prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
# -*- coding: utf-8 -*- """ Created on Wed Sep 19 09:45:18 2018 @author: cs17809 """ import matplotlib.pyplot as plt import numpy as np from scipy import ndimage import os import sys import imageio for i in range(11): im = np.zeros((128, 128)) np.random.seed(i) num_pixels = np.random.randint(1, 8) x, y = (123*np.random.random((2, num_pixels))).astype(np.int) im[x, y] = np.random.randint(1, 2**8 - 1, size=(num_pixels)) gx, gy =
np.random.randint(1, 30)
numpy.random.randint
#!/usr/bin/python import sys, getopt import os import pandas as pd import numpy as np import pyquaternion as pyq from pyquaternion import Quaternion from scipy import signal from scipy.spatial.transform import Slerp from scipy.spatial.transform import Rotation as R def main(argv): inputfile = '' calfile = '' outputfile = '' try: opts, args = getopt.getopt(argv,"hi:c:o:",["ifile=", "cfile=","ofile="]) except getopt.GetoptError: print('test.py -i <inputfile> -c <calfile> -o <outputfile>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('test.py -i <inputfile> -c calfile -o <outputfile>') sys.exit() elif opt in ("-i", "--ifile"): inputfile = arg elif opt in ("-c", "--ifile"): calfile = arg elif opt in ("-o", "--ofile"): outputfile = arg # Creating Functions def orientation_matrix(q0, q1, q2, q3): # based on https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/ r11 = 2 * (q0 ** 2 + q1 ** 2) - 1 r12 = 2 * (q1 * q2 - q0 * q3) r13 = 2 * (q1 * q3 + q0 * q2) r21 = 2 * (q1 * q2 + q0 * q3) r22 = 2 * (q0 ** 2 + q2 ** 2) - 1 r23 = 2 * (q2 * q3 - q0 * q1) r31 = 2 * (q1 * q3 - q0 * q2) r32 = 2 * (q2 * q3 + q0 * q1) r33 = 2 * (q0 ** 2 + q3 ** 2) - 1 return r11, r12, r13, r21, r22, r23, r31, r32, r33 def compute_relative_orientation(seg, cal): ''' Calculating the relative orientation between two matrices. This is used for the initial normalization procedure using the standing calibration ''' R_11 = np.array([]) R_12 = np.array([]) R_13 = np.array([]) R_21 = np.array([]) R_22 = np.array([]) R_23 = np.array([]) R_31 = np.array([]) R_32 = np.array([]) R_33 = np.array([]) for i in range(seg.shape[0]): segment = np.asmatrix([ [np.array(seg['o11'])[i], np.array(seg['o12'])[i], np.array(seg['o13'])[i]], [np.array(seg['o21'])[i], np.array(seg['o22'])[i], np.array(seg['o23'])[i]], [np.array(seg['o31'])[i], np.array(seg['o32'])[i], np.array(seg['o33'])[i]] ]) segment_cal = np.asmatrix([ [np.array(cal['o11'])[i], np.array(cal['o12'])[i], np.array(cal['o13'])[i]], [np.array(cal['o21'])[i], np.array(cal['o22'])[i], np.array(cal['o23'])[i]], [np.array(cal['o31'])[i], np.array(cal['o32'])[i], np.array(cal['o33'])[i]] ]) # normalization r = np.matmul(segment, segment_cal.T) new_orientations = np.asarray(r).reshape(-1) R_11 = np.append(R_11, new_orientations[0]) R_12 = np.append(R_12, new_orientations[1]) R_13 = np.append(R_13, new_orientations[2]) R_21 = np.append(R_21, new_orientations[3]) R_22 = np.append(R_22, new_orientations[4]) R_23 = np.append(R_23, new_orientations[5]) R_31 = np.append(R_31, new_orientations[6]) R_32 = np.append(R_32, new_orientations[7]) R_33 = np.append(R_33, new_orientations[8]) return R_11, R_12, R_13, R_21, R_22, R_23, R_31, R_32, R_33 def compute_joint_angle(df, child, parent): c = df[df[' jointType'] == child] p = df[df[' jointType'] == parent] ml = np.array([]) ap = np.array([]) v = np.array([]) # Compute Rotation Matrix Components for i in range(c.shape[0]): segment = np.asmatrix([ [np.array(c['n_o11'])[i], np.array(c['n_o12'])[i], np.array(c['n_o13'])[i]], [np.array(c['n_o21'])[i], np.array(c['n_o22'])[i], np.array(c['n_o23'])[i]], [np.array(c['n_o31'])[i], np.array(c['n_o32'])[i], np.array(c['n_o33'])[i]] ]) reference_segment = np.asmatrix([ [np.array(p['n_o11'])[i], np.array(p['n_o12'])[i], np.array(p['n_o13'])[i]], [np.array(p['n_o21'])[i], np.array(p['n_o22'])[i], np.array(p['n_o23'])[i]], [np.array(p['n_o31'])[i], np.array(p['n_o32'])[i], np.array(p['n_o33'])[i]] ]) # transformation of segment to reference segment r = np.matmul(reference_segment.T, segment) # decomposition to Euler angles rotations = R.from_matrix(r).as_euler('xyz', degrees=True) ml = np.append(ml, rotations[0]) ap = np.append(ap, rotations[1]) v = np.append(v, rotations[2]) return ml, ap, v def resample_df(d, new_freq=30, method='linear'): # Resamples data at 30Hz unless otherwise specified joints_without_quats = [3, 15, 19, 21, 22, 23, 24] resampled_df = pd.DataFrame( columns=['# timestamp', ' jointType', ' orientation.X', ' orientation.Y', ' orientation.Z', ' orientation.W', ' position.X', ' position.Y', ' position.Z']) new_df = pd.DataFrame() for i in d[' jointType'].unique(): current_df = d.loc[d[' jointType'] == i].copy() old_times = np.array(current_df['# timestamp']) new_times = np.arange(min(current_df['# timestamp']), max(current_df['# timestamp']), 1 / new_freq) o_x = np.array(current_df[' orientation.X']) o_y = np.array(current_df[' orientation.Y']) o_z = np.array(current_df[' orientation.Z']) o_w = np.array(current_df[' orientation.W']) p_x = np.array(current_df[' position.X']) p_y = np.array(current_df[' position.Y']) p_z = np.array(current_df[' position.Z']) if i in joints_without_quats: orientation_x = np.repeat(0.0, len(new_times)) orientation_y = np.repeat(0.0, len(new_times)) orientation_z = np.repeat(0.0, len(new_times)) orientation_w = np.repeat(0.0, len(new_times)) else: if method == "linear": orientation_x = np.interp(new_times, old_times, o_x) orientation_y = np.interp(new_times, old_times, o_y) orientation_z = np.interp(new_times, old_times, o_z) orientation_w = np.interp(new_times, old_times, o_w) elif method == 'slerp': quats = [] for t in range(len(old_times)): quats.append([o_x[t], o_y[t], o_z[t], o_w[t]]) # Create rotation object quats_object = R.from_quat(quats) # Spherical Linear Interpolation slerp = Slerp(np.array(current_df['# timestamp']), quats_object) interp_rots = slerp(new_times) new_quats = interp_rots.as_quat() # Create new orientation objects orientation_x = np.array([item[0] for item in new_quats]) orientation_y = np.array([item[1] for item in new_quats]) orientation_z = np.array([item[2] for item in new_quats]) orientation_w = np.array([item[3] for item in new_quats]) else: raise ValueError("Method must be either linear or spherical (slerp) interpolation.") position_x = signal.resample(p_x, num=int(max(current_df['# timestamp']) * new_freq)) position_y = signal.resample(p_y, num=int(max(current_df['# timestamp']) * new_freq)) position_z = signal.resample(p_z, num=int(max(current_df['# timestamp']) * new_freq)) new_df['# timestamp'] = pd.Series(new_times) new_df[' jointType'] = pd.Series(np.repeat(i, len(new_times))) new_df[' orientation.X'] = pd.Series(orientation_x) new_df[' orientation.Y'] = pd.Series(orientation_y) new_df[' orientation.Z'] = pd.Series(orientation_z) new_df[' orientation.W'] = pd.Series(orientation_w) new_df[' position.X'] = pd.Series(position_x) new_df[' position.Y'] = pd.Series(position_y) new_df[' position.Z'] = pd.Series(position_z) resampled_df = resampled_df.append(new_df, ignore_index=True) return resampled_df def smooth_rotations(o_x, o_y, o_z, o_w): o_x = np.array(o_x) o_y = np.array(o_y) o_z = np.array(o_z) o_w = np.array(o_w) trajNoisy = [] for i in range(len(o_x)): trajNoisy.append([o_x[i], o_y[i], o_z[i], o_w[i]]) trajNoisy = np.array(trajNoisy) # This code was adapted from https://ww2.mathworks.cn/help/nav/ug/lowpass-filter-orientation-using-quaternion-slerp.html # As explained in the link above, "The interpolation parameter to slerp is in the closed-interval [0,1], so the output of dist # must be re-normalized to this range. However, the full range of [0,1] for the interpolation parameter gives poor performance, # so it is limited to a smaller range hrange centered at hbias." hrange = 0.4 hbias = 0.4 low = max(min(hbias - (hrange / 2), 1), 0) high = max(min(hbias + (hrange / 2), 1), 0) hrangeLimited = high - low # initial filter state is the quaternion at frame 0 y = trajNoisy[0] qout = [] for i in range(1, len(trajNoisy)): x = trajNoisy[i] # x = mathutils.Quaternion(x) # y = mathutils.Quaternion(y) # d = x.rotation_difference(y).angle x = pyq.Quaternion(x) y = pyq.Quaternion(y) d = (x.conjugate * y).angle # Renormalize dist output to the range [low, high] hlpf = (d / np.pi) * hrangeLimited + low # y = y.slerp(x, hlpf) y = Quaternion.slerp(y, x, hlpf).elements qout.append(np.array(y)) # because a frame of data is lost during this process, I've (arbitrarily) decided to append an extra quaternion at the end of the trial # that is identical to the n-1th frame. This keeps the length consistent (so there is no issues with merging later) and should not # negatively impact the data since the last frame is rarely of interest (and the data collector can decide to collect for a split second # after their trial of interest has completed to attenuate any of these "errors" that may propogate in the analyses) qout.append(qout[int(len(qout) - 1)]) orientation_x = [item[0] for item in qout] orientation_y = [item[1] for item in qout] orientation_z = [item[2] for item in qout] orientation_w = [item[3] for item in qout] return orientation_x, orientation_y, orientation_z, orientation_w def smooth_quaternions(d): for i in d[' jointType'].unique(): current_df = d.loc[d[' jointType'] == i].copy() current_df[' orientation.X'], current_df[' orientation.Y'], current_df[' orientation.Z'], current_df[ ' orientation.W'] = smooth_rotations(current_df[' orientation.X'], current_df[' orientation.Y'], current_df[' orientation.Z'], current_df[' orientation.W']) d[d[' jointType'] == i] = current_df return d def compute_segment_angle(df, SEGMENT): s = df[df[' jointType'] == SEGMENT] ml = np.array([]) ap = np.array([]) v = np.array([]) # Compute Rotation Matrix Components for i in range(s.shape[0]): segment = np.asmatrix([ [np.array(s['n_o11'])[i], np.array(s['n_o12'])[i], np.array(s['n_o13'])[i]], [np.array(s['n_o21'])[i], np.array(s['n_o22'])[i], np.array(s['n_o23'])[i]], [np.array(s['n_o31'])[i], np.array(s['n_o32'])[i], np.array(s['n_o33'])[i]] ]) # decomposition to Euler angles rotations = R.from_matrix(segment).as_euler('xyz', degrees=True) ml = np.append(ml, rotations[0]) ap = np.append(ap, rotations[1]) v = np.append(v, rotations[2]) return ml, ap, v dir = os.getcwd() # Loading Data print('... Loading data') cal = pd.read_csv(os.path.join(dir, calfile)) df = pd.read_csv(os.path.join(dir, inputfile)) df['# timestamp'] = df['# timestamp'] * 10 ** -3 cal['# timestamp'] = cal['# timestamp'] * 10 ** -3 df_reoriented = df.copy() cal_reoriented = cal.copy() print('... Reorienting LCSs') # Hips df_reoriented.loc[df[' jointType'] == 16, ' orientation.X'] = df.loc[df[' jointType'] == 16, ' orientation.Z'] df_reoriented.loc[df[' jointType'] == 16, ' orientation.Y'] = df.loc[df[' jointType'] == 16, ' orientation.X'] df_reoriented.loc[df[' jointType'] == 16, ' orientation.Z'] = df.loc[df[' jointType'] == 16, ' orientation.Y'] cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.X'] = cal.loc[cal[' jointType'] == 16, ' orientation.Z'] cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Y'] = cal.loc[cal[' jointType'] == 16, ' orientation.X'] cal_reoriented.loc[cal[' jointType'] == 16, ' orientation.Z'] = cal.loc[cal[' jointType'] == 16, ' orientation.Y'] df_reoriented.loc[df[' jointType'] == 12, ' orientation.X'] = df.loc[df[' jointType'] == 12, ' orientation.Z'] df_reoriented.loc[df[' jointType'] == 12, ' orientation.Y'] = df.loc[df[' jointType'] == 12, ' orientation.X'] * -1 df_reoriented.loc[df[' jointType'] == 12, ' orientation.Z'] = df.loc[df[' jointType'] == 12, ' orientation.Y'] * -1 cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.X'] = cal.loc[cal[' jointType'] == 12, ' orientation.Z'] cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Y'] = cal.loc[cal[' jointType'] == 12, ' orientation.X'] * -1 cal_reoriented.loc[cal[' jointType'] == 12, ' orientation.Z'] = cal.loc[cal[' jointType'] == 12, ' orientation.Y'] * -1 # Knees df_reoriented.loc[df[' jointType'] == 17, ' orientation.X'] = df.loc[df[' jointType'] == 17, ' orientation.X'] * -1 df_reoriented.loc[df[' jointType'] == 17, ' orientation.Y'] = df.loc[df[' jointType'] == 17, ' orientation.Y'] * -1 df_reoriented.loc[df[' jointType'] == 17, ' orientation.Z'] = df.loc[df[' jointType'] == 17, ' orientation.Z'] cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.X'] = cal.loc[cal[' jointType'] == 17, ' orientation.X'] * -1 cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Y'] = cal.loc[cal[' jointType'] == 17, ' orientation.Y'] * -1 cal_reoriented.loc[cal[' jointType'] == 17, ' orientation.Z'] = cal.loc[cal[' jointType'] == 17, ' orientation.Z'] df_reoriented.loc[df[' jointType'] == 13, ' orientation.X'] = df.loc[df[' jointType'] == 13, ' orientation.X'] df_reoriented.loc[df[' jointType'] == 13, ' orientation.Y'] = df.loc[df[' jointType'] == 13, ' orientation.Y'] * -1 df_reoriented.loc[df[' jointType'] == 13, ' orientation.Z'] = df.loc[df[' jointType'] == 13, ' orientation.Z'] * -1 cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.X'] = cal.loc[cal[' jointType'] == 13, ' orientation.X'] cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Y'] = cal.loc[cal[' jointType'] == 13, ' orientation.Y'] * -1 cal_reoriented.loc[cal[' jointType'] == 13, ' orientation.Z'] = cal.loc[cal[' jointType'] == 13, ' orientation.Z'] * -1 # Ankles df_reoriented.loc[df[' jointType'] == 18, ' orientation.X'] = df.loc[df[' jointType'] == 18, ' orientation.X'] * -1 df_reoriented.loc[df[' jointType'] == 18, ' orientation.Y'] = df.loc[df[' jointType'] == 18, ' orientation.Y'] * -1 df_reoriented.loc[df[' jointType'] == 18, ' orientation.Z'] = df.loc[df[' jointType'] == 18, ' orientation.Z'] cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.X'] = cal.loc[cal[' jointType'] == 18, ' orientation.X'] * -1 cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Y'] = cal.loc[cal[' jointType'] == 18, ' orientation.Y'] * -1 cal_reoriented.loc[cal[' jointType'] == 18, ' orientation.Z'] = cal.loc[cal[' jointType'] == 18, ' orientation.Z'] df_reoriented.loc[df[' jointType'] == 14, ' orientation.X'] = df.loc[df[' jointType'] == 14, ' orientation.X'] df_reoriented.loc[df[' jointType'] == 14, ' orientation.Y'] = df.loc[df[' jointType'] == 14, ' orientation.Y'] * -1 df_reoriented.loc[df[' jointType'] == 14, ' orientation.Z'] = df.loc[df[' jointType'] == 14, ' orientation.Z'] * -1 cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.X'] = cal.loc[cal[' jointType'] == 14, ' orientation.X'] cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Y'] = cal.loc[cal[' jointType'] == 14, ' orientation.Y'] * -1 cal_reoriented.loc[cal[' jointType'] == 14, ' orientation.Z'] = cal.loc[cal[' jointType'] == 14, ' orientation.Z'] * -1 # Resampling data to 30Hz df_reoriented = resample_df(df_reoriented, new_freq=30, method='slerp') # Smooth Quaternion Rotations df_reoriented = smooth_quaternions(df_reoriented) # need to re-sort and reset the index following the resampling df_reoriented = df_reoriented.sort_values(by=['# timestamp', ' jointType']).reset_index() df_reoriented['o11'], df_reoriented['o12'], df_reoriented['o13'], df_reoriented['o21'], df_reoriented['o22'], \ df_reoriented['o23'], df_reoriented['o31'], df_reoriented['o32'], df_reoriented['o33'] \ = orientation_matrix(df_reoriented[' orientation.W'], df_reoriented[' orientation.X'], df_reoriented[' orientation.Y'], df_reoriented[' orientation.Z']) cal_reoriented['o11'], cal_reoriented['o12'], cal_reoriented['o13'], cal_reoriented['o21'], cal_reoriented['o22'], \ cal_reoriented['o23'], cal_reoriented['o31'], cal_reoriented['o32'], cal_reoriented['o33'] \ = orientation_matrix(cal_reoriented[' orientation.W'], cal_reoriented[' orientation.X'], cal_reoriented[' orientation.Y'], cal_reoriented[' orientation.Z']) df_reoriented.set_index(' jointType', inplace=True) cal_reoriented.set_index(' jointType', inplace=True) cal_reoriented = cal_reoriented.groupby(' jointType').mean().drop(columns=['# timestamp']) cal_reoriented = pd.concat([cal_reoriented] * np.int64(df_reoriented.shape[0] / 25)) print('... Normalizing to calibration pose') # Normalize orientations to calibration pose df_reoriented['n_o11'], df_reoriented['n_o12'], df_reoriented['n_o13'], df_reoriented['n_o21'], df_reoriented[ 'n_o22'], \ df_reoriented['n_o23'], df_reoriented['n_o31'], df_reoriented['n_o32'], df_reoriented['n_o33'] \ = np.array(compute_relative_orientation(cal_reoriented, df_reoriented)) df_reoriented.reset_index(inplace=True) print('... Computing joint angles') r_hipFlexion, r_hipAbduction, r_hipV = compute_joint_angle(df_reoriented, child=17, parent=16) l_hipFlexion, l_hipAbduction, l_hipV = compute_joint_angle(df_reoriented, child=13, parent=12) r_kneeFlexion, r_kneeAbduction, r_kneeV = compute_joint_angle(df_reoriented, child=18, parent=17) l_kneeFlexion, l_kneeAbduction, l_kneeV = compute_joint_angle(df_reoriented, child=14, parent=13) # Note that 16 or 12 can be used for the pelvis (given Kinect's definitions) pelvis_rotation = compute_segment_angle(df_reoriented, 16)[0] r_thigh_rotation = compute_segment_angle(df_reoriented, 17)[0] l_thigh_rotation = compute_segment_angle(df_reoriented, 13)[0] r_shank_rotation = compute_segment_angle(df_reoriented, 18)[0] l_shank_rotation = compute_segment_angle(df_reoriented, 14)[0] new_df = pd.DataFrame({ 'frame': np.arange(df_reoriented['# timestamp'].unique().shape[0]), 'timeStamp': df_reoriented['# timestamp'].unique(), # Below are adjusted for relatively easy anatomical interpretations 'r_hipFlexion' : r_hipFlexion, 'l_hipFlexion' : l_hipFlexion*-1, 'r_hipAbduction' : r_hipAbduction*-1, 'l_hipAbduction' : l_hipAbduction, 'r_hipV' : r_hipV *-1, 'l_hipV' : l_hipV *-1, 'r_kneeFlexion' : r_kneeFlexion*-1, 'l_kneeFlexion' : l_kneeFlexion, 'r_kneeAdduction' : r_kneeAbduction, 'l_kneeAdduction' : l_kneeAbduction*-1, 'r_kneeV' : r_kneeV*-1, 'l_kneeV' : l_kneeV, # Below are adjusted specifically for use with relative phase analyses 'pelvis_rotation': pelvis_rotation, 'r_thigh_rotation': r_thigh_rotation, 'l_thigh_rotation': l_thigh_rotation*-1, 'r_shank_rotation': r_shank_rotation, 'l_shank_rotation': l_shank_rotation*-1, # Below are left in the GCS 'r_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.X']), 'r_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Y']), 'r_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 16][' position.Z']), 'l_hip_x': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.X']), 'l_hip_y': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Y']), 'l_hip_z': np.array(df_reoriented[df_reoriented[' jointType'] == 12][' position.Z']), 'r_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.X']), 'r_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Y']), 'r_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 17][' position.Z']), 'l_knee_x': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.X']), 'l_knee_y': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Y']), 'l_knee_z': np.array(df_reoriented[df_reoriented[' jointType'] == 13][' position.Z']), 'r_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.X']), 'r_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Y']), 'r_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 18][' position.Z']), 'l_ankle_x': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.X']), 'l_ankle_y': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Y']), 'l_ankle_z': np.array(df_reoriented[df_reoriented[' jointType'] == 14][' position.Z']), 'r_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.X']), 'r_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Y']), 'r_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 19][' position.Z']), 'l_foot_x': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.X']), 'l_foot_y': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Y']), 'l_foot_z': np.array(df_reoriented[df_reoriented[' jointType'] == 15][' position.Z']), 'spinebase_x': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.X']), 'spinebase_y': np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Y']), 'spinebase_z':
np.array(df_reoriented[df_reoriented[' jointType'] == 0][' position.Z'])
numpy.array
import numpy as np from pytest import approx from proteus import equivalent_polynomials as eqp def test_1D(): from proteus.Quadrature import GaussEdge polyOrders = [1,2,3] quadOrderMax = 9 elements = [[0.,1.],[-2.,2.],[9.,10.]] phiList = [[-1.,-1.],[1.,1.],[-1.,1.],[0.,1.],[1.,0.]] for nP in polyOrders: for qO in range(nP,quadOrderMax): quad = GaussEdge(order=qO) gf = eqp.Simplex(nSpace=1, nP=nP, nQ=len(quad.points)) for phi in phiList: for e in elements: print(e,phi) dV = e[1]-e[0] assert(dV > 0) if phi[0]*phi[1] < 0.0: theta = -phi[0]/(phi[1] - phi[0]) x_0 = (1-theta)*e[0] + theta*e[1] int_H_exact = abs(e[1] - x_0) int_ImH_exact = abs(x_0 - e[0]) int_D_exact = 1.0 if phi[0] > 0.0: tmp=int_H_exact int_H_exact = int_ImH_exact int_ImH_exact = int_H_exact elif phi[0] < 0.0: int_H_exact = 0.0 int_ImH_exact = dV int_D_exact = 0.0 elif phi[0] > 0.0: int_H_exact = dV int_ImH_exact = 0.0 int_D_exact = 0.0 elif phi[0] == 0.0: if phi[1] > 0.0: int_H_exact = dV int_ImH_exact = 0.0 int_D_exact = 0.0 if phi[1] < 0.0: int_H_exact = 0.0 int_ImH_exact = dV int_D_exact = 1.0 elif phi[1] == 0.0: if phi[0] > 0.0: int_H_exact = dV int_ImH_exact = 0.0 int_D_exact = 0.0 if phi[0] < 0.0: int_H_exact = 0.0 int_ImH_exact = dV int_D_exact = 1.0 gf.calculate(np.array(phi), np.array([[e[0],0.,0.], [e[1],0.,0.]]), np.array(quad.points)) int_H=0.0 int_ImH=0.0 int_D=0.0 for k in range(len(quad.points)): gf.set_quad(k) int_H += quad.weights[k]*gf.H*dV int_ImH += quad.weights[k]*gf.ImH*dV int_D += quad.weights[k]*gf.D*dV assert(int_H == approx(int_H_exact,1e-15)) assert(int_ImH == approx(int_ImH_exact,1e-15)) assert(int_D == approx(int_D_exact,1e-15)) def test_2D(): from proteus.Quadrature import GaussTriangle polyOrders = [1,2,3] quadOrderMax = 6 elements = [ np.array([[0.,0.],[0.,1.],[1.,0.]]) ] phiList = [[-1.,-1.,-1.], [1.,1.,1.], [-1.,1.,1.], [0.,1.,1.], [1.,0.,0.]] for nP in polyOrders: for qO in range(nP,quadOrderMax): quad = GaussTriangle(order=qO) gf = eqp.Simplex(nSpace=2, nP=nP, nQ=len(quad.points)) for phi in phiList: for e in elements: b_0 = e[2,:] - e[0,:] b_1 = e[1,:] - e[0,:] Jac = np.array([b_0,b_1]).transpose() dV = abs(np.linalg.det(Jac)) area = dV/2.0 if phi[0]*phi[1] < 0.0 and phi[0]*phi[2] < 0.0: theta0 = -phi[0]/(phi[1] - phi[0]) theta1 = -phi[0]/(phi[2] - phi[0]) x_0 = (1-theta0)*e[0,:] + theta0*e[1,:] x_1 = (1-theta1)*e[0,:] + theta1*e[2,:] b_0 = x_1 - e[0,:] b_1 = x_0 - e[0,:] Jac_0 = np.array([b_0,b_1]).transpose() int_ImH_exact = np.linalg.det(Jac_0)/2.0 int_H_exact = area - int_ImH_exact int_D_exact = np.linalg.norm(x_1 - x_0) if phi[0] > 0.0: tmp=int_H_exact int_H_exact = int_ImH_exact int_ImH_exact = int_H_exact elif phi[0] < 0.0: int_H_exact = 0.0 int_ImH_exact = area int_D_exact = 0.0 elif phi[0] > 0.0: int_H_exact = area int_ImH_exact = 0.0 int_D_exact = 0.0 elif phi[0] == 0.0: if phi[1] > 0.0: int_H_exact = area int_ImH_exact = 0.0 int_D_exact = 0.0 if phi[1] < 0.0: int_H_exact = 0.0 int_ImH_exact = area int_D_exact = 1.0 elif phi[1] == 0.0: if phi[0] > 0.0: int_H_exact = area int_ImH_exact = 0.0 int_D_exact = 0.0 if phi[0] < 0.0: int_H_exact = 0.0 int_ImH_exact = area int_D_exact = 1.0 gf.calculate(np.array(phi), np.array([[e[0,0],e[0,1],0.], [e[1,0],e[1,1],0.], [e[2,0],e[2,1],0.]]), np.array(quad.points)) int_H=0.0 int_ImH=0.0 int_D=0.0 for k in range(len(quad.points)): gf.set_quad(k) int_H += quad.weights[k]*gf.H*dV int_ImH += quad.weights[k]*gf.ImH*dV int_D += quad.weights[k]*gf.D*dV assert(int_H == approx(int_H_exact,1e-15)) assert(int_ImH == approx(int_ImH_exact,1e-15)) assert(int_D == approx(int_D_exact,1e-15)) def test_3D(): from proteus.Quadrature import GaussTetrahedron polyOrders = [2]#1,2,3] quadOrderMax = 8 elements = [ np.array([[0.,0.,0.], [0.,0.,1.], [0.,1.,0.], [1.,0.,0.]]), #np.array([[312.90104741, 109.12205319, 82.38957441], # [371.40050898, 837.09283397, 117.87194133], # [666.41881358, 107.49166421, 419.82032267], # [659.82702468, 583.95327591, 217.72111178]]) ] #phiList = [[-348.3143481, -10.31124012, -2.73674249, 9.52267983]] phiList = [[-1.,-1.,-1.,-1.], [1.,1.,1.,1.], [-1.,1.,1.,1.], [0.,1.,1.,1.], [1.,0.,0.,0.]] for nP in polyOrders: #cek hack: start at nP+1 because I think 2nd order tet rule is single precisionx #therefore np==2 and q0 == 2 fails unless tol is reduced below for qO in [4]:#range(nP+1,quadOrderMax): quad = GaussTetrahedron(order=qO) gf = eqp.Simplex(nSpace=3, nP=nP, nQ=len(quad.points)) for phi in phiList: for e in elements: b_0 = e[3,:] - e[0,:] b_1 = e[2,:] - e[0,:] b_2 = e[1,:] - e[0,:] Jac = np.array([b_0, b_1, b_2]).transpose() dV = abs(
np.linalg.det(Jac)
numpy.linalg.det
""" Utilities to manipulate numpy arrays """ import sys from distutils.version import LooseVersion import numpy as np from nibabel.volumeutils import endian_codes, native_code, swapped_code NUMPY_LESS_1_8 = LooseVersion(np.version.short_version) < '1.8' def as_native_array(arr): """ Return `arr` as native byteordered array If arr is already native byte ordered, return unchanged. If it is opposite endian, then make a native byte ordered copy and return that Parameters ---------- arr : ndarray Returns ------- native_arr : ndarray If `arr` was native order, this is just `arr`. Otherwise it's a new array such that ``np.all(native_arr == arr)``, with native byte ordering. """ if endian_codes[arr.dtype.byteorder] == native_code: return arr return arr.byteswap().newbyteorder() def pinv(a, rcond=1e-15): """Vectorized version of `numpy.linalg.pinv` If numpy version is less than 1.8, it falls back to iterating over `np.linalg.pinv` since there isn't a vectorized version of `np.linalg.svd` available. Parameters ---------- a : array_like (..., M, N) Matrix to be pseudo-inverted. rcond : float Cutoff for small singular values. Returns ------- B : ndarray (..., N, M) The pseudo-inverse of `a`. Raises ------ LinAlgError If the SVD computation does not converge. See Also -------- np.linalg.pinv """ a = np.asarray(a) if NUMPY_LESS_1_8: if a.ndim <= 2: # properly handle the case of a single 2D array return np.linalg.pinv(a, rcond) shape = a.shape[:-2] a = a.reshape(-1, a.shape[-2], a.shape[-1]) result = np.empty((a.shape[0], a.shape[2], a.shape[1])) for i, item in enumerate(a): result[i] = np.linalg.pinv(item, rcond) return result.reshape(shape + (a.shape[2], a.shape[1])) else: swap = np.arange(a.ndim) swap[[-2, -1]] = swap[[-1, -2]] u, s, v = np.linalg.svd(a, full_matrices=False) cutoff = np.maximum.reduce(s, axis=-1, keepdims=True) * rcond mask = s > cutoff s[mask] = 1. / s[mask] s[~mask] = 0 return np.einsum('...ij,...jk', np.transpose(v, swap) * s[..., None, :], np.transpose(u, swap)) def eigh(a, UPLO='L'): """Iterate over `np.linalg.eigh` if it doesn't support vectorized operation Parameters ---------- a : array_like (..., M, M) Hermitian/Symmetric matrices whose eigenvalues and eigenvectors are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Returns ------- w : ndarray (..., M) The eigenvalues in ascending order, each repeated according to its multiplicity. v : ndarray (..., M, M) The column ``v[..., :, i]`` is the normalized eigenvector corresponding to the eigenvalue ``w[..., i]``. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- np.linalg.eigh """ a = np.asarray(a) if a.ndim > 2 and NUMPY_LESS_1_8: shape = a.shape[:-2] a = a.reshape(-1, a.shape[-2], a.shape[-1]) evals = np.empty((a.shape[0], a.shape[1])) evecs = np.empty((a.shape[0], a.shape[1], a.shape[1])) for i, item in enumerate(a): evals[i], evecs[i] =
np.linalg.eigh(item, UPLO)
numpy.linalg.eigh
""" Auhtor: <NAME> (<EMAIL>) """ from __future__ import print_function import locale from warnings import warn import time from sklearn.base import BaseEstimator from sklearn.utils import check_random_state, check_array from sklearn.neighbors import KDTree from sklearn.decomposition import PCA try: import joblib except ImportError: # sklearn.externals.joblib is deprecated in 0.21, will be removed in 0.23 from sklearn.externals import joblib import numpy as np import scipy.sparse import scipy.sparse.csgraph import numba import umato.distances as dist import umato.sparse as sparse from umato.utils import ( adjacency_matrix, ts, csr_unique, plot_tmptmp, ) from umato.layouts import ( optimize_global_layout, nn_layout_optimize, ) from umato.umap_ import ( nearest_neighbors, fuzzy_simplicial_set, make_epochs_per_sample, find_ab_params, ) try: # Use pynndescent, if installed (python 3 only) from pynndescent import NNDescent from pynndescent.distances import named_distances as pynn_named_distances from pynndescent.sparse import sparse_named_distances as pynn_sparse_named_distances _HAVE_PYNNDESCENT = True except ImportError: _HAVE_PYNNDESCENT = False locale.setlocale(locale.LC_NUMERIC, "C") INT32_MIN = np.iinfo(np.int32).min + 1 INT32_MAX = np.iinfo(np.int32).max - 1 @numba.njit( # parallel=True, # can SABOTAGE the array order (should be used with care) fastmath=True, ) def build_knn_graph( data, sorted_index, hub_num, ): sorted_index_c = sorted_index.copy() leaf_num = int(np.ceil(data.shape[0] / hub_num)) disjoints = [] for i in range(hub_num): tmp = 0 source = -1 disjoint = [] # append the first element for j in range(len(sorted_index_c)): if sorted_index_c[j] > -1: source = sorted_index_c[j] disjoint.append(source) sorted_index_c[j] = -1 tmp += 1 break if source == -1: break # break if all indices == -1 # get distance for each element distances = np.ones(len(sorted_index_c)) * np.inf for k in range(len(sorted_index_c)): distance = 0.0 if sorted_index_c[k] > -1: target = sorted_index_c[k] for d in range(data.shape[1]): distance += (data[source][d] - data[target][d]) ** 2 distances[target] = np.sqrt(distance) # append other elements for _ in range(leaf_num - 1): val = min(distances) if np.isinf(val): disjoint = disjoint + [-1] * (leaf_num - tmp) break else: min_index = np.argmin(distances) disjoint.append(min_index) distances[min_index] = np.inf sorted_index_c[sorted_index_c == min_index] = -1 tmp += 1 disjoints.append(disjoint) return np.array(disjoints) def pick_hubs( disjoints, random_state, popular=False, ): if popular: return disjoints[:, 0] else: hubs = [] (hub_num, _) = disjoints.shape # append until second to last element for i in range(hub_num - 1): choice = random_state.choice(disjoints[i]) hubs.append(choice) # append last element last = disjoints[hub_num - 1] last = last[last != -1] choice = random_state.choice(last) hubs.append(choice) if hub_num != len(hubs): ValueError(f"hub_num({hub_num}) is not the same as hubs({hubs})!") return hubs def build_global_structure( data, hubs, n_components, a, b, random_state, alpha=0.0065, n_epochs=30, verbose=False, label=None, init_global="pca", ): if init_global == "pca": Z = PCA(n_components=n_components).fit_transform(data[hubs]) Z /= Z.max() elif init_global == "random": Z = np.random.random((len(hubs), n_components)) else: raise ValueError("Check hub node initializing method!") P = adjacency_matrix(data[hubs]) # P /= np.sum(P, axis=1, keepdims=True) P /= P.max() if verbose: result = optimize_global_layout( P=P, Z=Z, a=a, b=b, alpha=alpha, n_epochs=n_epochs, verbose=True, savefig=False, label=label[hubs], ) else: result = optimize_global_layout( P, Z, a, b, alpha=alpha, n_epochs=n_epochs ) # (TODO) how to optimize n_epochs & alpha? return result def embed_others_nn_progressive( data, init_global, original_hubs, hubs, knn_indices, nn_consider, random_state, label, last=False ): init = np.zeros((data.shape[0], init_global.shape[1])) init[hubs] = init_global if last: while True: val = len(hubs) hubs = hub_nn_num( data=data, hubs=hubs, knn_indices=knn_indices, nn_consider=nn_consider, ) if val == len(hubs): if len(init) > len(hubs): print(f"len(hubs) {len(hubs)} is smaller than len(init) {len(init)}") break else: hubs = hub_nn_num( data=data, hubs=hubs, knn_indices=knn_indices, nn_consider=nn_consider, ) if len(init) > len(hubs): print(f"len(hubs) {len(hubs)} is smaller than len(init) {len(init)}") # generate random normal distribution random_normal = random_state.normal( loc=0.0, scale=0.05, size=list(init.shape) ).astype(np.float32) hub_nn = set(hubs) - set(original_hubs) hub_nn = np.array(list(hub_nn)) # initialize other nodes' position using only hub information init = nn_initialize( data=data, init=init, original_hubs=original_hubs, hub_nn=hub_nn, random=random_normal, nn_consider=10, # number of hubs to consider ) # np.array of hub information (hubs = 2, hub_nn = 1, outliers = 0) hub_info = np.zeros(data.shape[0]) hub_info[hub_nn] = 1 hub_info[original_hubs] = 2 # save figure2 plot_tmptmp(data=init[hubs], label=label[hubs], name=f"pic2") return init, hub_info, hubs def embed_outliers( data, init, hubs, disjoints, random_state, label, ): # generate random normal distribution random_normal = random_state.normal(scale=0.02, size=list(init.shape)).astype( np.float32 ) # append other nodes using NN disjoint information init, nodes_number = disjoint_initialize( data=data, init=init, hubs=hubs, disjoints=disjoints, random=random_normal, ) if len(init) != len(nodes_number): raise ValueError( f"total data # ({len(init)}) != total embedded # ({len(nodes_number)})!" ) # save figure3 plot_tmptmp(data=init, label=label, name="pic4_disjoint") return init @numba.njit() def disjoint_initialize( data, init, hubs, disjoints, random, nn_consider=1.0, ): hubs_true = np.zeros(data.shape[0]) hubs_true[hubs] = True hubs = set(hubs) nndist = np.sum(init[:, 1]) / len(hubs) for disjoint in disjoints: for j in disjoint: # j == -1 means we've run all the iteration if j == -1: break # if it is not a hub node, we should embed this using NN in disjoint set if not hubs_true[j]: distances = [] indices = [] # we use its neighbors for k in disjoint: if hubs_true[k]: distance = 0.0 for l in range(data.shape[1]): distance += (data[j][l] - data[k][l]) ** 2 distance = np.sqrt(distance) distances.append(distance) indices.append(k) nn_consider_tmp = nn_consider if len(distances) < nn_consider: nn_consider_tmp = len(distances) ixs = np.array(distances).argsort()[:nn_consider_tmp] init[j] = np.zeros(init.shape[1]) for ix in ixs: target_ix = indices[ix] init[j] += init[target_ix] init[j] /= nn_consider_tmp init[j] += random[j] # add random value hubs.add(j) return init, hubs @numba.njit() def hub_nn_num( data, hubs, knn_indices, nn_consider=10, ): num_log = np.zeros(data.shape[0]) num_log[hubs] = -1 hubs = set(hubs) hubs_fin = hubs.copy() for i in hubs: for j, e in enumerate(knn_indices[i]): if j > nn_consider: break if num_log[e] > -1: hubs_fin.add(e) return np.array(list(hubs_fin)) @numba.njit( locals={ "num_log": numba.types.float32[::1], "index": numba.types.int32, "dists": numba.types.float32[::1], "dist": numba.types.float32, }, parallel=True, fastmath=True, ) def nn_initialize( data, init, original_hubs, hub_nn, random, nn_consider=10, ): num_log = np.zeros(data.shape[0], dtype=np.float32) num_log[original_hubs] = -1 num_log[hub_nn] = -1 for i in numba.prange(len(hub_nn)): # find nearest hub nodes dists = np.zeros(len(original_hubs), dtype=np.float32) for j in numba.prange(len(original_hubs)): dist = 0.0 for d in numba.prange(data.shape[1]): e = original_hubs[j] dist += (data[e][d] - data[hub_nn[i]][d]) ** 2 dists[j] = dist # sorted hub indices dists_arg = dists.argsort(kind="quicksort") for k in numba.prange(nn_consider): index = original_hubs[dists_arg[k]] init[hub_nn[i]] += init[index] num_log[hub_nn[i]] += 1 # add random value before break init[hub_nn[i]] += random[hub_nn[i]] for l in numba.prange(data.shape[0]): if num_log[l] > 0: init[l] /= num_log[l] return init @numba.njit( locals={ "out_indices": numba.types.int32[:, ::1], "out_dists": numba.types.float32[:, ::1], "counts": numba.types.int32[::1], }, parallel=True, fastmath=True, ) def select_from_knn( knn_indices, knn_dists, hub_info, n_neighbors, n, ): out_indices = np.zeros((n, n_neighbors), dtype=np.int32) out_dists = np.zeros((n, n_neighbors), dtype=np.float32) counts = np.zeros(n, dtype=np.int32) for i in numba.prange(knn_indices.shape[0]): if hub_info[i] > 0: for j in numba.prange(knn_indices.shape[1]): # append directly if it is not an outlier if hub_info[knn_indices[i, j]] > 0: out_indices[i, counts[i]] = knn_indices[i, j] out_dists[i, counts[i]] = knn_dists[i, j] counts[i] += 1 if counts[i] == n_neighbors: break return out_indices, out_dists, counts @numba.njit( # locals={"dists": numba.types.float32[::1],}, parallel=True, fastmath=True, ) def apppend_knn( data, knn_indices, knn_dists, hub_info, n_neighbors, counts, counts_sum, ): for i in numba.prange(data.shape[0]): num = n_neighbors - counts[i] if hub_info[i] > 0 and num > 0: # found neighbors (# of neighbors < n_neighbors) neighbors = knn_indices[i][: counts[i]] # find unique target indices indices = set() for ci in range(counts[i]): # cannot use numba.prange; malloc error occurs cx = neighbors[ci] for cy in range(counts[cx]): indices.add(knn_indices[cx][cy]) # get target indices targets = indices - set(neighbors) targets = np.array(list(targets)) # if there is not enough target, it is a corner case (raise error) if len(targets) < num: return knn_indices, knn_dists, -1 else: # calculate distances dists = np.zeros(len(targets), dtype=np.float32) for k in numba.prange(len(targets)): dist = 0.0 for d in numba.prange(data.shape[1]): dist += (data[i][d] - data[targets[k]][d]) ** 2 dists[k] = np.sqrt(dist) sorted_dists_index = dists.argsort(kind="quicksort") # add more knns for j in numba.prange(num): knn_indices[i][counts[i] + j] = targets[ sorted_dists_index[counts[i] + j] ] knn_dists[i][counts[i] + j] = dists[ sorted_dists_index[counts[i] + j] ] # re-sort index sorted_knn_index = knn_dists[i].argsort(kind="quicksort") knn_indices[i] = knn_indices[i][sorted_knn_index] knn_dists[i] = knn_dists[i][sorted_knn_index] # for double check counts_sum -= 1 return knn_indices, knn_dists, counts_sum def local_optimize_nn( data, graph, hub_info, n_components, learning_rate, a, b, gamma, negative_sample_rate, n_epochs, init, random_state, parallel=False, verbose=False, label=None, k=0, ): graph = graph.tocoo() graph.sum_duplicates() n_vertices = graph.shape[1] graph.data[ hub_info[graph.col] == 2 ] = 1.0 # current (NNs) -- other (hubs): 1.0 weight graph.data[ hub_info[graph.row] == 2 ] = 0.0 # current (hubs) -- other (hubs, nns): 0.0 weight (remove) graph.data[graph.data < (graph.data.max() / float(n_epochs))] = 0.0 graph.eliminate_zeros() init_data = np.array(init) if len(init_data.shape) == 2: if np.unique(init_data, axis=0).shape[0] < init_data.shape[0]: tree = KDTree(init_data) dist, ind = tree.query(init_data, k=2) nndist = np.mean(dist[:, 1]) embedding = init_data + random_state.normal( scale=0.001 * nndist, size=init_data.shape ).astype(np.float32) else: embedding = init_data epochs_per_sample = make_epochs_per_sample(graph.data, n_epochs) head = graph.row tail = graph.col embedding = ( 10.0 * (embedding - np.min(embedding, 0)) / (
np.max(embedding, 0)
numpy.max
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ This module contains a plugin to calculate the enhancement of precipitation over orography. """ from typing import Tuple import iris import numpy as np from iris.analysis.cartography import rotate_winds from iris.cube import Cube from numpy import ndarray from scipy.ndimage import uniform_filter1d from improver import BasePlugin from improver.constants import R_WATER_VAPOUR from improver.metadata.constants.mo_attributes import MOSG_GRID_ATTRIBUTES from improver.metadata.utilities import generate_mandatory_attributes from improver.nbhood.nbhood import NeighbourhoodProcessing from improver.psychrometric_calculations.psychrometric_calculations import ( calculate_svp_in_air, ) from improver.utilities.cube_checker import check_for_x_and_y_axes from improver.utilities.cube_manipulation import ( compare_coords, enforce_coordinate_ordering, sort_coord_in_cube, ) from improver.utilities.spatial import ( GradientBetweenAdjacentGridSquares, number_of_grid_cells_to_distance, ) class OrographicEnhancement(BasePlugin): """ Class to calculate orographic enhancement from horizontal wind components, temperature and relative humidity. References: <NAME>. and <NAME>., 1989: Meso-Gamma-Scale Distribution of Orographic Precipitation: Numerical Study and Comparison with Precipitation Derived from Radar Measurements. Journal of Applied Meteorology, 28, 1105-1117. <NAME>., 2005: Orographic Precipitation. Annual Review of Earth and Planetary Sciences, 33, 645-671. """ def __init__(self) -> None: """ Initialise the plugin with thresholds from STEPS code. Usage as follows: Criteria for site orographic enhancement calculation: - 3x3 mean topography height >= self.orog_thresh_m (20 m) - Relative humidity (fraction) >= self.rh_thresh_ratio (0.8) - v dot grad z (wind x topography gradient) >= self.vgradz_thresh_ms (0.0005 m/s) Parameters for calculating upstream contribution: - Maximum range of an upstream cell to contribute to the total enhancement (self.upstream_range_of_influence_km). This is 15 km in STEPS. - Cloud lifetime (self.cloud_lifetime_s) defines the standard deviation of the distance weighting function for upstream enhancement contributions. This is 102 seconds in STEPS. - Scaling factor by which to multiply the weighted sum of upstream contributions (self.efficiency_factor). This is 0.23265 in STEPS. Create placeholder class members for regridded variable cubes (orography, temperature, humidity, pressure and wind components), saturation vapour pressure, V.gradZ (uplift) array and grid spacing. """ self.orog_thresh_m = 20.0 self.rh_thresh_ratio = 0.8 self.vgradz_thresh_ms = 0.0005 self.upstream_range_of_influence_km = 15.0 self.cloud_lifetime_s = 102.0 self.efficiency_factor = 0.23265 # initialise class members to store regridded variables for # orographic enhancement calculation self.topography = None self.temperature = None self.humidity = None self.pressure = None self.uwind = None self.vwind = None # initialise class members for derived variables and metadata self.vgradz = None self.svp = None self.grid_spacing_km = None def __repr__(self) -> str: """Represent the plugin instance as a string""" return "<OrographicEnhancement()>" def _orography_gradients(self) -> Tuple[Cube, Cube]: """ Calculates the dimensionless gradient of self.topography along both spatial axes, smoothed along the perpendicular axis. If spatial coordinates are not in the same units as topography height (m), converts coordinate units in place. Returns: - 2D cube of dimensionless topography gradients in the positive x direction - 2D cube of dimensionless topography gradients in the positive y direction """ self.topography.coord(axis="x").convert_units(self.topography.units) xdim = self.topography.coord_dims(self.topography.coord(axis="x"))[0] self.topography.coord(axis="y").convert_units(self.topography.units) ydim = self.topography.coord_dims(self.topography.coord(axis="y"))[0] # smooth topography by +/- one grid cell along the perpendicular axis # before calculating each gradient (as done in STEPS) topo_smx = uniform_filter1d(self.topography.data, 3, axis=ydim) topo_smx_cube = self.topography.copy(data=topo_smx) gradx, _ = GradientBetweenAdjacentGridSquares(regrid=True)(topo_smx_cube) gradx.units = "1" topo_smy = uniform_filter1d(self.topography.data, 3, axis=xdim) topo_smy_cube = self.topography.copy(data=topo_smy) _, grady = GradientBetweenAdjacentGridSquares(regrid=True)(topo_smy_cube) grady.units = "1" return gradx, grady def _regrid_variable(self, var_cube: Cube, unit: str) -> Cube: """ Sorts spatial coordinates in ascending order, regrids the input variable onto the topography grid and converts to the required units. This function does not modify the input variable cube. Args: var_cube: Cube containing input variable data unit: Required unit for this variable Returns: Cube containing regridded variable data """ for axis in ["x", "y"]: var_cube = sort_coord_in_cube(var_cube, var_cube.coord(axis=axis)) enforce_coordinate_ordering( var_cube, [var_cube.coord(axis="y").name(), var_cube.coord(axis="x").name()] ) regridder = iris.analysis.Linear() out_cube = var_cube.regrid(self.topography, regridder) out_cube.data = out_cube.data.astype(np.float32) out_cube.convert_units(unit) return out_cube def _regrid_and_populate( self, temperature: Cube, humidity: Cube, pressure: Cube, uwind: Cube, vwind: Cube, topography: Cube, ) -> None: """ Regrids input variables onto the high resolution orography field, then populates the class instance with regridded variables before converting to SI units. Also calculates V.gradZ as a class member. Args: temperature: Temperature at top of boundary layer humidity: Relative humidity at top of boundary layer pressure: Pressure at top of boundary layer uwind: Positive eastward wind vector component at top of boundary layer vwind: Positive northward wind vector component at top of boundary layer topography: Height of topography above sea level on 1 km UKPP domain grid """ # convert topography grid, datatype and units for axis in ["x", "y"]: topography = sort_coord_in_cube(topography, topography.coord(axis=axis)) enforce_coordinate_ordering( topography, [topography.coord(axis="y").name(), topography.coord(axis="x").name()], ) self.topography = topography.copy(data=topography.data.astype(np.float32)) self.topography.convert_units("m") # rotate winds try: uwind, vwind = rotate_winds(uwind, vwind, topography.coord_system()) except ValueError as err: if "Duplicate coordinates are not permitted" in str(err): # ignore error raised if uwind and vwind do not need rotating pass else: raise ValueError(str(err)) else: # remove auxiliary spatial coordinates from rotated winds for cube in [uwind, vwind]: for axis in ["x", "y"]: cube.remove_coord(cube.coord(axis=axis, dim_coords=False)) # regrid and convert input variables self.temperature = self._regrid_variable(temperature, "kelvin") self.humidity = self._regrid_variable(humidity, "1") self.pressure = self._regrid_variable(pressure, "Pa") self.uwind = self._regrid_variable(uwind, "m s-1") self.vwind = self._regrid_variable(vwind, "m s-1") # calculate orography gradients gradx, grady = self._orography_gradients() # calculate v.gradZ self.vgradz = np.multiply(gradx.data, self.uwind.data) + np.multiply( grady.data, self.vwind.data ) def _generate_mask(self) -> ndarray: """ Generates a boolean mask of areas NOT to calculate orographic enhancement. Criteria for calculating orographic enhancement are that all of the following are true: - 3x3 mean topography height >= threshold (20 m) - Relative humidity (fraction) >= threshold (0.8) - v dot grad z (wind x topography gradient) >= threshold (0.0005) The mask is therefore "True" if any of these conditions are false. Returns: Boolean mask - where True, set orographic enhancement to a default zero value """ # calculate mean 3x3 (square nbhood) orography heights radius = number_of_grid_cells_to_distance(self.topography, 1) topo_nbhood = NeighbourhoodProcessing("square", radius)(self.topography) topo_nbhood.convert_units("m") # create mask mask = np.full(topo_nbhood.shape, False, dtype=bool) mask = np.where(topo_nbhood.data < self.orog_thresh_m, True, mask) mask = np.where(self.humidity.data < self.rh_thresh_ratio, True, mask) mask = np.where(abs(self.vgradz) < self.vgradz_thresh_ms, True, mask) return mask def _point_orogenh(self) -> ndarray: """ Calculate the grid-point precipitation enhancement contribution due to orographic uplift using: orogenh = ((humidity * svp * vgradz) / (R_WATER_VAPOUR * temperature)) * 60 * 60 Returns: Orographic enhancement values in mm/h """ mask = np.logical_not(self._generate_mask()) point_orogenh = np.zeros(self.temperature.data.shape, dtype=np.float32) prefactor = 3600.0 / R_WATER_VAPOUR numerator = np.multiply(self.humidity.data, self.svp) numerator = np.multiply(numerator, self.vgradz) point_orogenh[mask] = prefactor * np.divide( numerator[mask], self.temperature.data[mask] ) return np.where(point_orogenh > 0, point_orogenh, 0) def _get_point_distances( self, wind_speed: ndarray, max_sin_cos: ndarray ) -> ndarray: """ Generate 3d array of distances to upstream components Args: wind_speed: 2D array of wind speeds max_sin_cos: 2D array containing the larger of sin(wind_direction) or cos(wind_direction) with respect to grid north Returns: 3D array of source-to-destination distances in grid points, with np.nan filled in for out of range values """ # calculate maximum upstream radius of influence at each grid cell upstream_roi = self.upstream_range_of_influence_km / self.grid_spacing_km max_roi = (upstream_roi * max_sin_cos).astype(int) length = np.amax(max_roi) shape = (length, wind_speed.shape[0], wind_speed.shape[1]) distance = np.full(shape, np.nan, dtype=np.float32) for y in range(distance.shape[1]): for x in range(distance.shape[2]): distance[: max_roi[y, x], y, x] = ( np.arange(max_roi[y, x]) / max_sin_cos[y, x] ) return distance @staticmethod def _locate_source_points( wind_speed: ndarray, distance: ndarray, sin_wind_dir: ndarray, cos_wind_dir: ndarray, ) -> Tuple[ndarray, ndarray]: """ Generate 3D arrays of source points from which to add upstream orographic enhancement contribution. Assumes spatial coordinate ordering [y, x]. Args: wind_speed: 2D array of wind speed magnitudes distance: 3D array of grid point source-to-destination distances sin_wind_dir: 2D array of sin wind direction wrt grid north cos_wind_dir: 2D array of cos wind direction wrt grid north Returns: - 3D array of source point x-coordinates - 3D array of source point y-coordinates """ xpos, ypos = np.meshgrid( np.arange(wind_speed.shape[1]), np.arange(wind_speed.shape[0]) ) x_source = np.around(xpos - np.multiply(distance, sin_wind_dir)).astype(int) y_source = np.around(ypos - np.multiply(distance, cos_wind_dir)).astype(int) # force coordinates into bounds to avoid truncation at domain edges x_source = np.where(x_source < 0, 0, x_source) x_source = np.where( x_source > wind_speed.shape[1] - 1, wind_speed.shape[1] - 1, x_source ) y_source = np.where(y_source < 0, 0, y_source) y_source = np.where( y_source > wind_speed.shape[0] - 1, wind_speed.shape[0] - 1, y_source ) return x_source, y_source def _compute_weighted_values( self, point_orogenh: ndarray, x_source: ndarray, y_source: ndarray, distance: ndarray, wind_speed: ndarray, ) -> Tuple[ndarray, ndarray]: """ Extract orographic enhancement values from source points and weight according to source-destination distance. Args: point_orogenh: 2D array of point orographic enhancement values x_source: 3D array of x-coordinates of source points from which to read upstream contribution y_source: 3D array of y-coordinates of source points from which to read upstream contribution distance: 3D array of grid point source-to-destination distances wind_speed: 2D array of wind speeds Returns: - 2D array containing a weighted sum of orographic enhancement components from upstream source points - 2D array containing weights for normalisation """ source_values = np.fromiter( ( point_orogenh[y, x] for (x, y) in zip(x_source.flatten(), y_source.flatten()) ), np.float32, count=x_source.size, ).reshape(x_source.shape) # set standard deviation for Gaussian weighting function in grid # squares grid_spacing_m = 1000.0 * self.grid_spacing_km stddev = wind_speed * self.cloud_lifetime_s / grid_spacing_m variance = np.square(stddev) # calculate weighted values at source points value_weight = np.where( (np.isfinite(distance)) & (variance > 0), np.exp(np.divide(-0.5 * np.square(distance), variance)), 0, ) sum_of_weights =
np.sum(value_weight, axis=0)
numpy.sum
# -------------------------------------------------------- # Faster R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> and <NAME> # -------------------------------------------------------- import os import yaml import numpy as np import numpy.random as npr from .generate_anchors import generate_anchors from ..utils.cython_bbox import bbox_overlaps, bbox_intersections # TODO: make fast_rcnn irrelevant # >>>> obsolete, because it depends on sth outside of this project from ..fast_rcnn.config import cfg from ..fast_rcnn.bbox_transform import bbox_transform # <<<< obsolete def anchor_target_layer(rpn_cls_score, gt_boxes, gt_ishard, dontcare_areas, im_info, feat_stride_vec=[4,8,16,32,64], anchor_scales=[2, 4, 8, 16, 32]): """ Assign anchors to ground-truth targets. Produces anchor classification labels and bounding-box regression targets. Parameters ---------- rpn_cls_score: for pytorch (1, Ax2, H, W) bg/fg scores of previous conv layer gt_boxes: (G, 5) vstack of [x1, y1, x2, y2, class] gt_ishard: (G, 1), 1 or 0 indicates difficult or not dontcare_areas: (D, 4), some areas may contains small objs but no labelling. D may be 0 im_info: a list of [image_height, image_width, scale_ratios] _feat_stride: the downsampling ratio of feature map to the original input image anchor_scales: the scales to the basic_anchor (basic anchor is [16, 16]) ---------- Returns ---------- rpn_labels : (HxWxA, 1), for each anchor, 0 denotes bg, 1 fg, -1 dontcare rpn_bbox_targets: (HxWxA, 4), distances of the anchors to the gt_boxes(may contains some transform) that are the regression objectives rpn_bbox_inside_weights: (HxWxA, 4) weights of each boxes, mainly accepts hyper param in cfg rpn_bbox_outside_weights: (HxWxA, 4) used to balance the fg/bg, beacuse the numbers of bgs and fgs mays significiantly different """ # allow boxes to sit over the edge by a small amount _allowed_border = 1000 im_info = im_info[0] fpn_args = [] fpn_anchors_fid = np.zeros(0).astype(int) fpn_anchors = np.zeros([0, 4]) fpn_labels = np.zeros(0) fpn_inds_inside = [] fpn_size = len(rpn_cls_score) #[P2,P3,P4,P5,P6] for i in range(fpn_size): _anchors = generate_anchors(scales=np.array([anchor_scales[i]])) _num_anchors = _anchors.shape[0] _feat_stride = feat_stride_vec[i] # map of shape (..., H, W) #height, width = rpn_cls_score.shape[1:3] # Algorithm: # # for each (H, W) location i # generate 9 anchor boxes centered on cell i # apply predicted bbox deltas at cell i to each of the 9 anchors # filter out-of-image anchors # measure GT overlap assert rpn_cls_score[i].shape[0] == 1, \ 'Only single item batches are supported' # map of shape (..., H, W) # pytorch (bs, c, h, w) height, width = rpn_cls_score[i].shape[2:4] # 1. Generate proposals from bbox deltas and shifted anchors shift_x = np.arange(0, width) * _feat_stride shift_y = np.arange(0, height) * _feat_stride shift_x, shift_y = np.meshgrid(shift_x, shift_y) # in W H order # K is H x W shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose() # add A anchors (1, A, 4) to # cell K shifts (K, 1, 4) to get # shift anchors (K, A, 4) # reshape to (K*A, 4) shifted anchors A = _num_anchors K = shifts.shape[0] all_anchors = (_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))) all_anchors = all_anchors.reshape((K * A, 4)) total_anchors = int(K * A) # only keep anchors inside the image inds_inside = np.where( (all_anchors[:, 0] >= -_allowed_border) & (all_anchors[:, 1] >= -_allowed_border) & (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width (all_anchors[:, 3] < im_info[0] + _allowed_border) # height )[0] # keep only inside anchors anchors = all_anchors[inds_inside, :] # label: 1 is positive, 0 is negative, -1 is dont care # (A) labels = np.empty((len(inds_inside),), dtype=np.float32) labels.fill(-1) fpn_anchors_fid = np.hstack((fpn_anchors_fid, len(inds_inside))) fpn_anchors = np.vstack((fpn_anchors, anchors)) fpn_labels = np.hstack((fpn_labels, labels)) fpn_inds_inside.append(inds_inside) fpn_args.append([height, width, A, total_anchors]) if len(gt_boxes) > 0: # overlaps between the anchors and the gt boxes # overlaps (ex, gt), shape is A x G overlaps = bbox_overlaps( np.ascontiguousarray(fpn_anchors, dtype=np.float), np.ascontiguousarray(gt_boxes, dtype=np.float)) argmax_overlaps = overlaps.argmax(axis=1) # (A) max_overlaps = overlaps[np.arange(len(fpn_anchors)), argmax_overlaps] gt_argmax_overlaps = overlaps.argmax(axis=0) # G gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])] gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0] if not cfg.TRAIN.RPN_CLOBBER_POSITIVES: # assign bg labels first so that positive labels can clobber them fpn_labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0 # fg label: for each gt, anchor with highest overlap fpn_labels[gt_argmax_overlaps] = 1 # fg label: above threshold IOU fpn_labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1 if cfg.TRAIN.RPN_CLOBBER_POSITIVES: # assign bg labels last so that negative labels can clobber positives fpn_labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0 else: fpn_labels[:]=0 # preclude dontcare areas if dontcare_areas is not None and dontcare_areas.shape[0] > 0: # intersec shape is D x A intersecs = bbox_intersections( np.ascontiguousarray(dontcare_areas, dtype=np.float), # D x 4 np.ascontiguousarray(fpn_anchors, dtype=np.float) # A x 4 ) intersecs_ = intersecs.sum(axis=0) # A x 1 fpn_labels[intersecs_ > cfg.TRAIN.DONTCARE_AREA_INTERSECTION_HI] = -1 # preclude hard samples that are highly occlusioned, truncated or difficult to see if cfg.TRAIN.PRECLUDE_HARD_SAMPLES and gt_ishard is not None and gt_ishard.shape[0] > 0: assert gt_ishard.shape[0] == gt_boxes.shape[0] gt_ishard = gt_ishard.astype(int) gt_hardboxes = gt_boxes[gt_ishard == 1, :] if gt_hardboxes.shape[0] > 0: # H x A hard_overlaps = bbox_overlaps( np.ascontiguousarray(gt_hardboxes, dtype=np.float), # H x 4 np.ascontiguousarray(fpn_anchors, dtype=np.float)) # A x 4 hard_max_overlaps = hard_overlaps.max(axis=0) # (A) fpn_labels[hard_max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = -1 max_intersec_label_inds = hard_overlaps.argmax(axis=1) # H x 1 fpn_labels[max_intersec_label_inds] = -1 # # subsample positive labels if we have too many #num_fg = fpn_labels.shape[0] if cfg.TRAIN.RPN_BATCHSIZE == -1 else int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE) num_fg = fpn_labels.shape[0] if cfg.TRAIN.RPN_BATCHSIZE == -1 else int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE) fg_inds = np.where(fpn_labels >= 1)[0] if len(fg_inds) > num_fg: disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False) fpn_labels[disable_inds] = -1 # subsample negative labels if we have too many #num_bg = fpn_labels.shape[0] if cfg.TRAIN.RPN_BATCHSIZE == -1 else cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1) num_bg = fpn_labels.shape[0] if cfg.TRAIN.RPN_BATCHSIZE == -1 else cfg.TRAIN.RPN_BATCHSIZE - np.sum(fpn_labels >= 1) bg_inds = np.where(fpn_labels == 0)[0] fpn_anchors_fid = np.hstack((0, fpn_anchors_fid.cumsum())) if len(bg_inds) > num_bg: disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False) fpn_labels[disable_inds] = -1 # print "was %s inds, disabling %s, now %s inds" % ( # len(bg_inds), len(disable_inds), np.sum(labels == 0)) fpn_bbox_targets = np.zeros((len(fpn_anchors), 4), dtype=np.float32) if gt_boxes.size > 0: fpn_bbox_targets[fpn_labels >= 1, :] = bbox_transform(fpn_anchors[fpn_labels >= 1, :], gt_boxes[argmax_overlaps[fpn_labels >= 1], :4]) # fpn_bbox_targets[:] = bbox_transform(fpn_anchors, gt_boxes[argmax_overlaps, :4]) # fpn_bbox_targets = (fpn_bbox_targets - np.array(cfg.TRAIN.BBOX_MEANS)) / np.array(cfg.TRAIN.BBOX_STDS) fpn_bbox_weights = np.zeros((len(fpn_anchors), 4), dtype=np.float32) fpn_bbox_weights[fpn_labels >= 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS) fpn_bbox_outside_weights = np.zeros((len(fpn_anchors), 4), dtype=np.float32) if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0: # uniform weighting of examples (given non-uniform sampling) # num_examples = np.sum(labels >= 0) + 1 # positive_weights = np.ones((1, 4)) * 1.0 / num_examples # negative_weights = np.ones((1, 4)) * 1.0 / num_examples positive_weights = np.ones((1, 4)) negative_weights = np.zeros((1, 4)) else: assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) & (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1)) positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT / (np.sum(fpn_labels == 1)) + 1) negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) / (
np.sum(fpn_labels == 0)
numpy.sum
from __future__ import print_function import numpy as np import yt from hyperion.model import Model import matplotlib as mpl mpl.use('Agg') import powderday.config as cfg from powderday.grid_construction import yt_octree_generate from powderday.find_order import find_order import powderday.powderday_test_octree as pto import powderday.hyperion_octree_stats as hos from hyperion.dust import SphericalDust from powderday.helpers import energy_density_absorbed_by_CMB from powderday.analytics import dump_cell_info def sph_m_gen(fname,field_add): refined,dustdens,fc1,fw1,reg,ds = yt_octree_generate(fname,field_add) if yt.__version__ == '4.0.dev0': xmin = (fc1[:,0]-fw1[:,0]/2.).to('cm') #in proper cm xmax = (fc1[:,0]+fw1[:,0]/2.).to('cm') ymin = (fc1[:,1]-fw1[:,1]/2.).to('cm') ymax = (fc1[:,1]+fw1[:,1]/2.).to('cm') zmin = (fc1[:,2]-fw1[:,2]/2.).to('cm') zmax = (fc1[:,2]+fw1[:,2]/2.).to('cm') else: xmin = (fc1[:,0]-fw1[:,0]/2.).convert_to_units('cm') #in proper cm xmax = (fc1[:,0]+fw1[:,0]/2.).convert_to_units('cm') ymin = (fc1[:,1]-fw1[:,1]/2.).convert_to_units('cm') ymax = (fc1[:,1]+fw1[:,1]/2.).convert_to_units('cm') zmin = (fc1[:,2]-fw1[:,2]/2.).convert_to_units('cm') zmax = (fc1[:,2]+fw1[:,2]/2.).convert_to_units('cm') #dx,dy,dz are the edges of the parent grid dx = (np.max(xmax)-np.min(xmin)).value dy = (np.max(ymax)-np.min(ymin)).value dz = (np.max(zmax)-np.min(zmin)).value xcent = float(ds.quan(cfg.model.x_cent,"code_length").to('cm').value) ycent = float(ds.quan(cfg.model.y_cent,"code_length").to('cm').value) zcent = float(ds.quan(cfg.model.z_cent,"code_length").to('cm').value) boost = np.array([xcent,ycent,zcent]) print ('[sph_tributary] boost = ',boost) print ('[sph_tributary] xmin (pc)= ',np.min(xmin.to('pc'))) print ('[sph_tributary] xmax (pc)= ',np.max(xmax.to('pc'))) print ('[sph_tributary] ymin (pc)= ',np.min(ymin.to('pc'))) print ('[sph_tributary] ymax (pc)= ',np.max(ymax.to('pc'))) print ('[sph_tributary] zmin (pc)= ',np.min(zmin.to('pc'))) print ('[sph_tributary] zmax (pc)= ',np.max(zmax.to('pc'))) #<NAME>'s conversion from z-first ordering (yt's default) to #x-first ordering (the script should work both ways) refined_array =
np.array(refined)
numpy.array
# Copyright (c) 2003-2019 by <NAME> # # TreeCorr is free software: redistribution and use in source and binary forms, # with or without modification, are permitted provided that the following # conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions, and the disclaimer given in the accompanying LICENSE # file. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the disclaimer given in the documentation # and/or other materials provided with the distribution. from __future__ import print_function import numpy as np import treecorr import os import coord import fitsio from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer from test_helper import is_ccw, is_ccw_3d @timer def test_log_binning(): import math # Test some basic properties of the base class def check_arrays(nnn): np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep)) np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u) np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v) #print('logr = ',nnn.logr1d) np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) ) np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size) np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) ) np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d) np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d) assert len(nnn.logr) == nnn.nbins #print('u = ',nnn.u1d) np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) ) np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size) np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size) np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) ) np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d) np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d) #print('v = ',nnn.v1d) np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) ) np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size) np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size) np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size) np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size) np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) ) np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d) np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d) def check_defaultuv(nnn): assert nnn.min_u == 0. assert nnn.max_u == 1. assert nnn.nubins == np.ceil(1./nnn.ubin_size) assert nnn.min_v == 0. assert nnn.max_v == 1. assert nnn.nvbins == np.ceil(1./nnn.vbin_size) # Check the different ways to set up the binning: # Omit bin_size nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 check_defaultuv(nnn) check_arrays(nnn) # Specify min, max, n for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, min_u=0.2, max_u=0.9, nubins=12, min_v=0., max_v=0.2, nvbins=2) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 assert nnn.min_u == 0.2 assert nnn.max_u == 0.9 assert nnn.nubins == 12 assert nnn.min_v == 0. assert nnn.max_v == 0.2 assert nnn.nvbins == 2 check_arrays(nnn) # Omit min_sep nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size == 0.1 assert nnn.max_sep == 20. assert nnn.nbins == 20 check_defaultuv(nnn) check_arrays(nnn) # Specify max, n, bs for u,v too. nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1, max_u=0.9, nubins=3, ubin_size=0.05, max_v=0.4, nvbins=4, vbin_size=0.05) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size == 0.1 assert nnn.max_sep == 20. assert nnn.nbins == 20 assert np.isclose(nnn.ubin_size, 0.05) assert np.isclose(nnn.min_u, 0.75) assert nnn.max_u == 0.9 assert nnn.nubins == 3 assert np.isclose(nnn.vbin_size, 0.05) assert np.isclose(nnn.min_v, 0.2) assert nnn.max_v == 0.4 assert nnn.nvbins == 4 check_arrays(nnn) # Omit max_sep nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size == 0.1 assert nnn.min_sep == 5. assert nnn.nbins == 20 check_defaultuv(nnn) check_arrays(nnn) # Specify min, n, bs for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1, min_u=0.7, nubins=4, ubin_size=0.05, min_v=0.2, nvbins=4, vbin_size=0.05) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_sep == 5. assert nnn.bin_size == 0.1 assert nnn.nbins == 20 assert nnn.min_u == 0.7 assert np.isclose(nnn.ubin_size, 0.05) assert nnn.nubins == 4 assert nnn.min_v == 0.2 assert nnn.max_v == 0.4 assert np.isclose(nnn.vbin_size, 0.05) assert nnn.nvbins == 4 check_arrays(nnn) # Omit nbins nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. check_defaultuv(nnn) check_arrays(nnn) # Specify min, max, bs for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2, max_u=0.9, ubin_size=0.03, min_v=0.1, max_v=0.3, vbin_size=0.07) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.bin_size <= 0.1 assert nnn.min_u == 0.2 assert nnn.max_u == 0.9 assert nnn.nubins == 24 assert np.isclose(nnn.ubin_size, 0.7/24) assert nnn.min_v == 0.1 assert nnn.max_v == 0.3 assert nnn.nvbins == 3 assert np.isclose(nnn.vbin_size, 0.2/3) check_arrays(nnn) # If only one of min/max v are set, respect that nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2, ubin_size=0.03, min_v=0.2, vbin_size=0.07) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_u == 0.2 assert nnn.max_u == 1. assert nnn.nubins == 27 assert np.isclose(nnn.ubin_size, 0.8/27) assert nnn.min_v == 0.2 assert nnn.max_v == 1. assert nnn.nvbins == 12 assert np.isclose(nnn.vbin_size, 0.8/12) check_arrays(nnn) nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, max_u=0.2, ubin_size=0.03, max_v=0.2, vbin_size=0.07) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.min_u == 0. assert nnn.max_u == 0.2 assert nnn.nubins == 7 assert np.isclose(nnn.ubin_size, 0.2/7) assert nnn.min_v == 0. assert nnn.max_v == 0.2 assert nnn.nvbins == 3 assert np.isclose(nnn.vbin_size, 0.2/3) check_arrays(nnn) # If only vbin_size is set for v, automatically figure out others. # (And if necessary adjust the bin_size down a bit.) nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, ubin_size=0.3, vbin_size=0.3) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.min_u == 0. assert nnn.max_u == 1. assert nnn.nubins == 4 assert np.isclose(nnn.ubin_size, 0.25) assert nnn.min_v == 0. assert nnn.max_v == 1. assert nnn.nvbins == 4 assert np.isclose(nnn.vbin_size, 0.25) check_arrays(nnn) # If only nvbins is set for v, automatically figure out others. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, nubins=5, nvbins=5) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.min_u == 0. assert nnn.max_u == 1. assert nnn.nubins == 5 assert np.isclose(nnn.ubin_size,0.2) assert nnn.min_v == 0. assert nnn.max_v == 1. assert nnn.nvbins == 5 assert np.isclose(nnn.vbin_size,0.2) check_arrays(nnn) # If both nvbins and vbin_size are set, set min/max automatically nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, ubin_size=0.1, nubins=5, vbin_size=0.1, nvbins=5) #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.ubin_size == 0.1 assert nnn.nubins == 5 assert nnn.max_u == 1. assert np.isclose(nnn.min_u,0.5) assert nnn.vbin_size == 0.1 assert nnn.nvbins == 5 assert nnn.min_v == 0. assert np.isclose(nnn.max_v,0.5) check_arrays(nnn) assert_raises(TypeError, treecorr.NNNCorrelation) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5) assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20) assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1) assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20) assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1) assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20) assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, bin_type='Log') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, bin_type='Linear') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, bin_type='TwoD') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, bin_type='Invalid') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_u=0.9, max_u=0.3) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_u=-0.1, max_u=0.3) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_u=0.1, max_u=1.3) assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_v=0.9, max_v=0.3) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_v=-0.1, max_v=0.3) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, min_v=0.1, max_v=1.3) assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, split_method='invalid') # Check the use of sep_units # radians nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5.) np.testing.assert_almost_equal(nnn._max_sep, 20.) assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 check_defaultuv(nnn) check_arrays(nnn) # arcsec nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600) np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600) assert nnn.nbins == 20 np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep)) # Note that logr is in the separation units, not radians. np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logr) == nnn.nbins check_defaultuv(nnn) # arcmin nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60) np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60) assert nnn.nbins == 20 np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep)) np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logr) == nnn.nbins check_defaultuv(nnn) # degrees nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180) np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180) assert nnn.nbins == 20 np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep)) np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logr) == nnn.nbins check_defaultuv(nnn) # hours nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours') #print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins) #print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins) #print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins) np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12) np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12) assert nnn.nbins == 20 np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep)) np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logr) == nnn.nbins check_defaultuv(nnn) # Check bin_slop # Start with default behavior nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.1) np.testing.assert_almost_equal(nnn.bu, 0.03) np.testing.assert_almost_equal(nnn.bv, 0.07) # Explicitly set bin_slop=1.0 does the same thing. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.1) np.testing.assert_almost_equal(nnn.bu, 0.03) np.testing.assert_almost_equal(nnn.bv, 0.07) # Use a smaller bin_slop nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 0.2 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.02) np.testing.assert_almost_equal(nnn.bu, 0.006) np.testing.assert_almost_equal(nnn.bv, 0.014) # Use bin_slop == 0 nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 0.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.0) np.testing.assert_almost_equal(nnn.bu, 0.0) np.testing.assert_almost_equal(nnn.bv, 0.0) # Bigger bin_slop nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07, verbose=0) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 2.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.2) np.testing.assert_almost_equal(nnn.bu, 0.06) np.testing.assert_almost_equal(nnn.bv, 0.14) # With bin_size > 0.1, explicit bin_slop=1.0 is accepted. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07, verbose=0) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.4 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.4) np.testing.assert_almost_equal(nnn.bu, 0.03) np.testing.assert_almost_equal(nnn.bv, 0.07) # But implicit bin_slop is reduced so that b = 0.1 nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, min_u=0., max_u=0.9, ubin_size=0.03, min_v=0., max_v=0.21, vbin_size=0.07) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_size == 0.4 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) np.testing.assert_almost_equal(nnn.b, 0.1) np.testing.assert_almost_equal(nnn.bu, 0.03) np.testing.assert_almost_equal(nnn.bv, 0.07) np.testing.assert_almost_equal(nnn.bin_slop, 0.25) # Separately for each of the three parameters nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05, min_u=0., max_u=0.9, ubin_size=0.3, min_v=0., max_v=0.17, vbin_size=0.17) #print(nnn.bin_size,nnn.bin_slop,nnn.b) #print(nnn.ubin_size,nnn.bu) #print(nnn.vbin_size,nnn.bv) assert nnn.bin_size == 0.05 assert np.isclose(nnn.ubin_size, 0.3) assert np.isclose(nnn.vbin_size, 0.17) np.testing.assert_almost_equal(nnn.b, 0.05) np.testing.assert_almost_equal(nnn.bu, 0.1) np.testing.assert_almost_equal(nnn.bv, 0.1) np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr @timer def test_direct_count_auto(): # If the catalogs are small enough, we can do a direct count of the number of triangles # to see if comes out right. This should exactly match the treecorr code if bin_slop=0. ngal = 50 s = 10. rng = np.random.RandomState(8675309) x = rng.normal(0,s, (ngal,) ) y = rng.normal(0,s, (ngal,) ) cat = treecorr.Catalog(x=x, y=y) min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(ngal): for j in range(i+1,ngal): for k in range(j+1,ngal): dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2) dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2) djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k]) elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k]) else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i]) else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j]) elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j]) else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i]) r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 nz = np.where((ddd.ntri > 0) | (true_ntri > 0)) print('non-zero at:') print(nz) print('d1 = ',ddd.meand1[nz]) print('d2 = ',ddd.meand2[nz]) print('d3 = ',ddd.meand3[nz]) print('rnom = ',ddd.rnom[nz]) print('u = ',ddd.u[nz]) print('v = ',ddd.v[nz]) print('ddd.ntri = ',ddd.ntri[nz]) print('true_ntri = ',true_ntri[nz]) print('diff = ',ddd.ntri[nz] - true_ntri[nz]) np.testing.assert_array_equal(ddd.ntri, true_ntri) # Check that running via the corr3 script works correctly. file_name = os.path.join('data','nnn_direct_data.dat') with open(file_name, 'w') as fid: for i in range(ngal): fid.write(('%.20f %.20f\n')%(x[i],y[i])) L = 10*s nrand = ngal rx = (rng.random_sample(nrand)-0.5) * L ry = (rng.random_sample(nrand)-0.5) * L rcat = treecorr.Catalog(x=rx, y=ry) rand_file_name = os.path.join('data','nnn_direct_rand.dat') with open(rand_file_name, 'w') as fid: for i in range(nrand): fid.write(('%.20f %.20f\n')%(rx[i],ry[i])) rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=0, rng=rng) rrr.process(rcat) zeta, varzeta = ddd.calculateZeta(rrr) # Semi-gratuitous check of BinnedCorr3.rng access. assert rrr.rng is rng assert ddd.rng is not rng # First do this via the corr3 function. config = treecorr.config.read_config('configs/nnn_direct.yaml') logger = treecorr.config.setup_logger(0) treecorr.corr3(config, logger) corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1) print('corr3_output = ',corr3_output) print('corr3_output.dtype = ',corr3_output.dtype) print('rnom = ',ddd.rnom.flatten()) print(' ',corr3_output['r_nom']) np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3) print('unom = ',ddd.u.flatten()) print(' ',corr3_output['u_nom']) np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3) print('vnom = ',ddd.v.flatten()) print(' ',corr3_output['v_nom']) np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3) print('DDD = ',ddd.ntri.flatten()) print(' ',corr3_output['DDD']) np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3) print('RRR = ',rrr.ntri.flatten()) print(' ',corr3_output['RRR']) np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3) print('zeta = ',zeta.flatten()) print('from corr3 output = ',corr3_output['zeta']) print('diff = ',corr3_output['zeta']-zeta.flatten()) diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0] print('different at ',diff_index) print('zeta[diffs] = ',zeta.flatten()[diff_index]) print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index]) print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index]) np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3) # Now calling out to the external corr3 executable. # This is the only time we test the corr3 executable. All other tests use corr3 function. import subprocess corr3_exe = get_script_name('corr3') p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] ) p.communicate() corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1) np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3) # Also check compensated drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=0) rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=0) drr.process(cat, rcat) rdd.process(rcat, cat) zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd) config['nnn_statistic'] = 'compensated' treecorr.corr3(config, logger) corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True, skip_header=1) np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3) print('rrr.tot = ',rrr.tot) print('ddd.tot = ',ddd.tot) print('drr.tot = ',drr.tot) print('rdd.tot = ',rdd.tot) rrrf = ddd.tot / rrr.tot drrf = ddd.tot / drr.tot rddf = ddd.tot / rdd.tot np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten() * rrrf, rtol=1.e-3) np.testing.assert_allclose(corr3_output['DRR'], drr.ntri.flatten() * drrf, rtol=1.e-3) np.testing.assert_allclose(corr3_output['RDD'], rdd.ntri.flatten() * rddf, rtol=1.e-3) np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3) # Repeat with binslop = 0, since the code flow is different from bture=True ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, true_ntri) # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) ddd.process(cat) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, true_ntri) # And compare to the cross correlation # Here, we get 6x as much, since each triangle is discovered 6 times. ddd.clear() ddd.process(cat,cat,cat, num_threads=2) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) # With the real CrossCorrelation class, each of the 6 correlations should end up being # the same thing (without the extra factor of 6). dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) dddc.process(cat,cat,cat, num_threads=2) # All 6 correlations are equal. for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]: #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(d.ntri, true_ntri) # Or with 2 argument version, finds each triangle 3 times. ddd.process(cat,cat, num_threads=2) np.testing.assert_array_equal(ddd.ntri, 3*true_ntri) # Again, NNNCrossCorrelation gets it right in each permutation. dddc.process(cat,cat, num_threads=2) for d in [dddc.n1n2n3, dddc.n1n3n2, dddc.n2n1n3, dddc.n2n3n1, dddc.n3n1n2, dddc.n3n2n1]: np.testing.assert_array_equal(d.ntri, true_ntri) # Invalid to omit file_name config['verbose'] = 0 del config['file_name'] with assert_raises(TypeError): treecorr.corr3(config) config['file_name'] = 'data/nnn_direct_data.dat' # OK to not have rand_file_name # Also, check the automatic setting of output_dots=True when verbose=2. # It's not too annoying if we also set max_top = 0. del config['rand_file_name'] config['verbose'] = 2 config['max_top'] = 0 treecorr.corr3(config) data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1) np.testing.assert_array_equal(data['ntri'], true_ntri.flatten()) assert 'zeta' not in data.dtype.names # Check a few basic operations with a NNNCorrelation object. do_pickle(ddd) ddd2 = ddd.copy() ddd2 += ddd np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri) np.testing.assert_allclose(ddd2.weight, 2*ddd.weight) np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1) np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2) np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3) np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1) np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2) np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3) np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu) np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv) ddd2.clear() ddd2 += ddd np.testing.assert_allclose(ddd2.ntri, ddd.ntri) np.testing.assert_allclose(ddd2.weight, ddd.weight) np.testing.assert_allclose(ddd2.meand1, ddd.meand1) np.testing.assert_allclose(ddd2.meand2, ddd.meand2) np.testing.assert_allclose(ddd2.meand3, ddd.meand3) np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1) np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2) np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3) np.testing.assert_allclose(ddd2.meanu, ddd.meanu) np.testing.assert_allclose(ddd2.meanv, ddd.meanv) ascii_name = 'output/nnn_ascii.txt' ddd.write(ascii_name, precision=16) ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) ddd3.read(ascii_name) np.testing.assert_allclose(ddd3.ntri, ddd.ntri) np.testing.assert_allclose(ddd3.weight, ddd.weight) np.testing.assert_allclose(ddd3.meand1, ddd.meand1) np.testing.assert_allclose(ddd3.meand2, ddd.meand2) np.testing.assert_allclose(ddd3.meand3, ddd.meand3) np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1) np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2) np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3) np.testing.assert_allclose(ddd3.meanu, ddd.meanu) np.testing.assert_allclose(ddd3.meanv, ddd.meanv) with assert_raises(TypeError): ddd2 += config ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd4 ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd5 ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd6 ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u-0.1, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd7 ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u+0.1, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd8 ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins*2, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd9 ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v-0.1, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd10 ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v+0.1, nvbins=nvbins) with assert_raises(ValueError): ddd2 += ddd11 ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins*2) with assert_raises(ValueError): ddd2 += ddd12 # Check that adding results with different coords or metric emits a warning. cat2 = treecorr.Catalog(x=x, y=y, z=x) with CaptureLog() as cl: ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, logger=cl.logger) ddd13.process_auto(cat2) ddd13 += ddd2 print(cl.output) assert "Detected a change in catalog coordinate systems" in cl.output with CaptureLog() as cl: ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, logger=cl.logger) ddd14.process_auto(cat2, metric='Arc') ddd14 += ddd2 assert "Detected a change in metric" in cl.output fits_name = 'output/nnn_fits.fits' ddd.write(fits_name) ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) ddd15.read(fits_name) np.testing.assert_allclose(ddd15.ntri, ddd.ntri) np.testing.assert_allclose(ddd15.weight, ddd.weight) np.testing.assert_allclose(ddd15.meand1, ddd.meand1) np.testing.assert_allclose(ddd15.meand2, ddd.meand2) np.testing.assert_allclose(ddd15.meand3, ddd.meand3) np.testing.assert_allclose(ddd15.meanlogd1, ddd.meanlogd1) np.testing.assert_allclose(ddd15.meanlogd2, ddd.meanlogd2) np.testing.assert_allclose(ddd15.meanlogd3, ddd.meanlogd3) np.testing.assert_allclose(ddd15.meanu, ddd.meanu) np.testing.assert_allclose(ddd15.meanv, ddd.meanv) @timer def test_direct_count_cross(): # If the catalogs are small enough, we can do a direct count of the number of triangles # to see if comes out right. This should exactly match the treecorr code if brute=True ngal = 50 s = 10. rng = np.random.RandomState(8675309) x1 = rng.normal(0,s, (ngal,) ) y1 = rng.normal(0,s, (ngal,) ) cat1 = treecorr.Catalog(x=x1, y=y1) x2 = rng.normal(0,s, (ngal,) ) y2 = rng.normal(0,s, (ngal,) ) cat2 = treecorr.Catalog(x=x2, y=y2) x3 = rng.normal(0,s, (ngal,) ) y3 = rng.normal(0,s, (ngal,) ) cat3 = treecorr.Catalog(x=x3, y=y3) min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat1, cat2, cat3) #print('ddd.ntri = ',ddd.ntri) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(ngal): for j in range(ngal): for k in range(ngal): dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2) dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2) djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k]) true_ntri = true_ntri_123 elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k]) true_ntri = true_ntri_213 else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i]) true_ntri = true_ntri_231 else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j]) true_ntri = true_ntri_132 elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j]) true_ntri = true_ntri_312 else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i]) true_ntri = true_ntri_321 r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 # With the regular NNNCorrelation class, we end up with the sum of all permutations. true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ true_ntri_312 + true_ntri_321 #print('true_ntri = ',true_ntri_sum) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Now repeat with the full CrossCorrelation class, which distinguishes the permutations. dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) dddc.process(cat1, cat2, cat3) #print('true_ntri_123 = ',true_ntri_123) #print('diff = ',dddc.n1n2n3.ntri - true_ntri_123) np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_132) np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_213) np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_231) np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_312) np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_321) # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat1, cat2, cat3) #print('binslop > 0: ddd.ntri = ',ddd.ntri) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) ddd.process(cat1, cat2, cat3) #print('max_top = 0: ddd.ntri = ',ddd.ntri) #print('true_ntri = ',true_ntri_sum) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Error to have cat3, but not cat2 with assert_raises(ValueError): ddd.process(cat1, cat3=cat3) # Check a few basic operations with a NNCrossCorrelation object. do_pickle(dddc) dddc2 = dddc.copy() dddc2 += dddc for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']: d2 = getattr(dddc2, perm) d1 = getattr(dddc, perm) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.ntri, 2*d1.ntri) np.testing.assert_allclose(d2.meand1, 2*d1.meand1) np.testing.assert_allclose(d2.meand2, 2*d1.meand2) np.testing.assert_allclose(d2.meand3, 2*d1.meand3) np.testing.assert_allclose(d2.meanlogd1, 2*d1.meanlogd1) np.testing.assert_allclose(d2.meanlogd2, 2*d1.meanlogd2) np.testing.assert_allclose(d2.meanlogd3, 2*d1.meanlogd3) np.testing.assert_allclose(d2.meanu, 2*d1.meanu) np.testing.assert_allclose(d2.meanv, 2*d1.meanv) dddc2.clear() dddc2 += dddc for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']: d2 = getattr(dddc2, perm) d1 = getattr(dddc, perm) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.meand1, d1.meand1) np.testing.assert_allclose(d2.meand2, d1.meand2) np.testing.assert_allclose(d2.meand3, d1.meand3) np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1) np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2) np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3) np.testing.assert_allclose(d2.meanu, d1.meanu) np.testing.assert_allclose(d2.meanv, d1.meanv) with assert_raises(TypeError): dddc2 += {} # not an NNNCrossCorrelation with assert_raises(TypeError): dddc2 += ddd # not an NNNCrossCorrelation dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) with assert_raises(ValueError): dddc2 += dddc4 # binning doesn't match # Test I/O ascii_name = 'output/nnnc_ascii.txt' dddc.write(ascii_name, precision=16) dddc3 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) dddc3.read(ascii_name) for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']: d2 = getattr(dddc3, perm) d1 = getattr(dddc, perm) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.meand1, d1.meand1) np.testing.assert_allclose(d2.meand2, d1.meand2) np.testing.assert_allclose(d2.meand3, d1.meand3) np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1) np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2) np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3) np.testing.assert_allclose(d2.meanu, d1.meanu) np.testing.assert_allclose(d2.meanv, d1.meanv) fits_name = 'output/nnnc_fits.fits' dddc.write(fits_name) dddc4 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) dddc4.read(fits_name) for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']: d2 = getattr(dddc4, perm) d1 = getattr(dddc, perm) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.meand1, d1.meand1) np.testing.assert_allclose(d2.meand2, d1.meand2) np.testing.assert_allclose(d2.meand3, d1.meand3) np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1) np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2) np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3) np.testing.assert_allclose(d2.meanu, d1.meanu) np.testing.assert_allclose(d2.meanv, d1.meanv) try: import h5py except ImportError: print('Skipping hdf5 output file, since h5py not installed.') return hdf5_name = 'output/nnnc_hdf5.hdf5' dddc.write(hdf5_name) dddc5 = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins) dddc5.read(hdf5_name) for perm in ['n1n2n3', 'n1n3n2', 'n2n1n3', 'n2n3n1', 'n3n1n2', 'n3n2n1']: d2 = getattr(dddc5, perm) d1 = getattr(dddc, perm) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.ntri, d1.ntri) np.testing.assert_allclose(d2.meand1, d1.meand1) np.testing.assert_allclose(d2.meand2, d1.meand2) np.testing.assert_allclose(d2.meand3, d1.meand3) np.testing.assert_allclose(d2.meanlogd1, d1.meanlogd1) np.testing.assert_allclose(d2.meanlogd2, d1.meanlogd2) np.testing.assert_allclose(d2.meanlogd3, d1.meanlogd3) np.testing.assert_allclose(d2.meanu, d1.meanu) np.testing.assert_allclose(d2.meanv, d1.meanv) @timer def test_direct_count_cross12(): # Check the 1-2 cross correlation ngal = 50 s = 10. rng = np.random.RandomState(8675309) x1 = rng.normal(0,s, (ngal,) ) y1 = rng.normal(0,s, (ngal,) ) cat1 = treecorr.Catalog(x=x1, y=y1) x2 = rng.normal(0,s, (ngal,) ) y2 = rng.normal(0,s, (ngal,) ) cat2 = treecorr.Catalog(x=x2, y=y2) min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat1, cat2) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri_122 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_212 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_221 = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(ngal): for j in range(ngal): for k in range(j+1,ngal): dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2) dik = np.sqrt((x1[i]-x2[k])**2 + (y1[i]-y2[k])**2) djk = np.sqrt((x2[j]-x2[k])**2 + (y2[j]-y2[k])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x2[k],y2[k]) true_ntri = true_ntri_122 elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x2[k],y2[k]) true_ntri = true_ntri_212 else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw(x2[j],y2[j],x2[k],y2[k],x1[i],y1[i]) true_ntri = true_ntri_221 else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw(x1[i],y1[i],x2[k],y2[k],x2[j],y2[j]) true_ntri = true_ntri_122 elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw(x2[k],y2[k],x1[i],y1[i],x2[j],y2[j]) true_ntri = true_ntri_212 else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw(x2[k],y2[k],x2[j],y2[j],x1[i],y1[i]) true_ntri = true_ntri_221 r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 # With the regular NNNCorrelation class, we end up with the sum of all permutations. true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221 #print('ddd.ntri = ',ddd.ntri) #print('true_ntri = ',true_ntri_sum) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Now repeat with the full CrossCorrelation class, which distinguishes the permutations. dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) dddc.process(cat1, cat2) #print('true_ntri_122 = ',true_ntri_122) #print('diff = ',dddc.n1n2n3.ntri - true_ntri_122) np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122) np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122) np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212) np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221) np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212) np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221) # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat1, cat2) #print('binslop > 0: ddd.ntri = ',ddd.ntri) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) ddd.process(cat1, cat2) #print('max_top = 0: ddd.ntri = ',ddd.ntri) #print('true_ntri = ',true_ntri_sum) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Split into patches to test the list-based version of the code. cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10) cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10) ddd.process(cat1, cat2) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) dddc.process(cat1, cat2) np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri_122) np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri_122) np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri_212) np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri_221) np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri_212) np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri_221) @timer def test_direct_spherical(): # Repeat in spherical coords ngal = 50 s = 10. rng = np.random.RandomState(8675309) x = rng.normal(0,s, (ngal,) ) y = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky z = rng.normal(0,s, (ngal,) ) w = rng.random_sample(ngal) ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z) cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w) min_sep = 1. bin_size = 0.2 nrbins = 10 nubins = 5 nvbins = 5 ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, sep_units='deg', brute=True) ddd.process(cat, num_threads=2) r = np.sqrt(x**2 + y**2 + z**2) x /= r; y /= r; z /= r true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int) true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float) rad_min_sep = min_sep * coord.degrees / coord.radians for i in range(ngal): for j in range(i+1,ngal): for k in range(j+1,ngal): d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2) d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2) d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2) d3, d2, d1 = sorted([d12, d23, d31]) rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int) if rindex < 0 or rindex >= nrbins: continue if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i else: assert False # Now use ii, jj, kk rather than i,j,k, to get the indices # that correspond to the points in the right order. u = d3/d2 v = (d1-d2)/d3 if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] + ((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] + ((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0: v = -v uindex = np.floor(u / bin_size).astype(int) assert 0 <= uindex < nubins vindex = np.floor((v+1) / bin_size).astype(int) assert 0 <= vindex < 2*nvbins www = w[i] * w[j] * w[k] true_ntri[rindex,uindex,vindex] += 1 true_weight[rindex,uindex,vindex] += www np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) # Check that running via the corr3 script works correctly. config = treecorr.config.read_config('configs/nnn_direct_spherical.yaml') cat.write(config['file_name']) treecorr.corr3(config) data = fitsio.read(config['nnn_file_name']) np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten()) np.testing.assert_allclose(data['u_nom'], ddd.u.flatten()) np.testing.assert_allclose(data['v_nom'], ddd.v.flatten()) np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten()) np.testing.assert_allclose(data['DDD'], ddd.weight.flatten()) # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, sep_units='deg', bin_slop=0, max_top=0) ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) @timer def test_direct_arc(): # Repeat the spherical test with metric='Arc' ngal = 5 s = 10. rng = np.random.RandomState(8675309) x = rng.normal(0,s, (ngal,) ) y = rng.normal(0,s, (ngal,) ) + 200 # Large angles this time. z = rng.normal(0,s, (ngal,) ) w = rng.random_sample(ngal) ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z) cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w) min_sep = 1. max_sep = 180. nrbins = 50 nubins = 5 nvbins = 5 bin_size = np.log((max_sep / min_sep)) / nrbins ubin_size = 0.2 vbin_size = 0.2 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size, sep_units='deg', brute=True) ddd.process(cat, metric='Arc') r = np.sqrt(x**2 + y**2 + z**2) x /= r; y /= r; z /= r true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int) true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float) c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)] for i in range(ngal): for j in range(i+1,ngal): for k in range(j+1,ngal): d12 = c[i].distanceTo(c[j]) / coord.degrees d23 = c[j].distanceTo(c[k]) / coord.degrees d31 = c[k].distanceTo(c[i]) / coord.degrees d3, d2, d1 = sorted([d12, d23, d31]) rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int) if rindex < 0 or rindex >= nrbins: continue if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i else: assert False # Now use ii, jj, kk rather than i,j,k, to get the indices # that correspond to the points in the right order. u = d3/d2 v = (d1-d2)/d3 if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] + ((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] + ((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0: v = -v uindex = np.floor(u / ubin_size).astype(int) assert 0 <= uindex < nubins vindex = np.floor((v+1) / vbin_size).astype(int) assert 0 <= vindex < 2*nvbins www = w[i] * w[j] * w[k] true_ntri[rindex,uindex,vindex] += 1 true_weight[rindex,uindex,vindex] += www np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) # Check that running via the corr3 script works correctly. config = treecorr.config.read_config('configs/nnn_direct_arc.yaml') cat.write(config['file_name']) treecorr.corr3(config) data = fitsio.read(config['nnn_file_name']) np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten()) np.testing.assert_allclose(data['u_nom'], ddd.u.flatten()) np.testing.assert_allclose(data['v_nom'], ddd.v.flatten()) np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten()) np.testing.assert_allclose(data['DDD'], ddd.weight.flatten()) # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size, sep_units='deg', bin_slop=0, max_top=0) ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) @timer def test_direct_partial(): # Test the two ways to only use parts of a catalog: ngal = 100 s = 10. rng = np.random.RandomState(8675309) x1 = rng.normal(0,s, (ngal,) ) y1 = rng.normal(0,s, (ngal,) ) cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=84) x2 = rng.normal(0,s, (ngal,) ) y2 = rng.normal(0,s, (ngal,) ) cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=99) x3 = rng.normal(0,s, (ngal,) ) y3 = rng.normal(0,s, (ngal,) ) cat3a = treecorr.Catalog(x=x3, y=y3, first_row=22, last_row=67) min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True) ddda.process(cat1a, cat2a, cat3a) #print('ddda.ntri = ',ddda.ntri) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(27,84): for j in range(47,99): for k in range(21,67): dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2) dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2) djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k]) true_ntri = true_ntri_123 elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw(x2[j],y2[j],x1[i],y1[i],x3[k],y3[k]) true_ntri = true_ntri_213 else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw(x2[j],y2[j],x3[k],y3[k],x1[i],y1[i]) true_ntri = true_ntri_231 else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw(x1[i],y1[i],x3[k],y3[k],x2[j],y2[j]) true_ntri = true_ntri_132 elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw(x3[k],y3[k],x1[i],y1[i],x2[j],y2[j]) true_ntri = true_ntri_312 else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw(x3[k],y3[k],x2[j],y2[j],x1[i],y1[i]) true_ntri = true_ntri_321 assert d1 >= d2 >= d3 r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ true_ntri_312 + true_ntri_321 print('true_ntri = ',true_ntri_sum) print('diff = ',ddda.ntri - true_ntri_sum) np.testing.assert_array_equal(ddda.ntri, true_ntri_sum) # Now with real CrossCorrelation ddda = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True) ddda.process(cat1a, cat2a, cat3a) #print('132 = ',ddda.n1n3n2.ntri) #print('true 132 = ',true_ntri_132) #print('213 = ',ddda.n2n1n3.ntri) #print('true 213 = ',true_ntri_213) #print('231 = ',ddda.n2n3n1.ntri) #print('true 231 = ',true_ntri_231) #print('311 = ',ddda.n3n1n2.ntri) #print('true 312 = ',true_ntri_312) #print('321 = ',ddda.n3n2n1.ntri) #print('true 321 = ',true_ntri_321) np.testing.assert_array_equal(ddda.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(ddda.n1n3n2.ntri, true_ntri_132) np.testing.assert_array_equal(ddda.n2n1n3.ntri, true_ntri_213) np.testing.assert_array_equal(ddda.n2n3n1.ntri, true_ntri_231) np.testing.assert_array_equal(ddda.n3n1n2.ntri, true_ntri_312) np.testing.assert_array_equal(ddda.n3n2n1.ntri, true_ntri_321) # Now check that we get the same thing with all the points, but with w=0 for the ones # we don't want. w1 = np.zeros(ngal) w1[27:84] = 1. w2 = np.zeros(ngal) w2[47:99] = 1. w3 = np.zeros(ngal) w3[21:67] = 1. cat1b = treecorr.Catalog(x=x1, y=y1, w=w1) cat2b = treecorr.Catalog(x=x2, y=y2, w=w2) cat3b = treecorr.Catalog(x=x3, y=y3, w=w3) dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True) dddb.process(cat1b, cat2b, cat3b) #print('dddb.ntri = ',dddb.ntri) #print('diff = ',dddb.ntri - true_ntri_sum) np.testing.assert_array_equal(dddb.ntri, true_ntri_sum) dddb = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True) dddb.process(cat1b, cat2b, cat3b) #print('dddb.n1n2n3.ntri = ',dddb.n1n2n3.ntri) #print('diff = ',dddb.n1n2n3.ntri - true_ntri) np.testing.assert_array_equal(dddb.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(dddb.n1n3n2.ntri, true_ntri_132) np.testing.assert_array_equal(dddb.n2n1n3.ntri, true_ntri_213) np.testing.assert_array_equal(dddb.n2n3n1.ntri, true_ntri_231) np.testing.assert_array_equal(dddb.n3n1n2.ntri, true_ntri_312) np.testing.assert_array_equal(dddb.n3n2n1.ntri, true_ntri_321) @timer def test_direct_3d_auto(): # This is the same as test_direct_count_auto, but using the 3d correlations ngal = 50 s = 10. rng = np.random.RandomState(8675309) x = rng.normal(312, s, (ngal,) ) y = rng.normal(728, s, (ngal,) ) z = rng.normal(-932, s, (ngal,) ) r = np.sqrt( x*x + y*y + z*z ) dec = np.arcsin(z/r) ra = np.arctan2(y,x) cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad') min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat) #print('ddd.ntri = ',ddd.ntri) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(ngal): for j in range(i+1,ngal): for k in range(j+1,ngal): dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2) dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2) djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k]) elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k]) else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i]) else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j]) elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j]) else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i]) r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, true_ntri) # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat) #print('ddd.ntri = ',ddd.ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, true_ntri) # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) ddd.process(cat) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, true_ntri) # And compare to the cross correlation # Here, we get 6x as much, since each triangle is discovered 6 times. ddd.clear() ddd.process(cat,cat,cat) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) dddc = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) dddc.process(cat,cat,cat) #print('ddd.ntri = ',ddd.ntri) #print('true_ntri => ',true_ntri) #print('diff = ',ddd.ntri - true_ntri) np.testing.assert_array_equal(dddc.n1n2n3.ntri, true_ntri) np.testing.assert_array_equal(dddc.n1n3n2.ntri, true_ntri) np.testing.assert_array_equal(dddc.n2n1n3.ntri, true_ntri) np.testing.assert_array_equal(dddc.n2n3n1.ntri, true_ntri) np.testing.assert_array_equal(dddc.n3n1n2.ntri, true_ntri) np.testing.assert_array_equal(dddc.n3n2n1.ntri, true_ntri) # Also compare to using x,y,z rather than ra,dec,r cat = treecorr.Catalog(x=x, y=y, z=z) ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @timer def test_direct_3d_cross(): # This is the same as test_direct_count_cross, but using the 3d correlations ngal = 50 s = 10. rng = np.random.RandomState(8675309) x1 = rng.normal(312, s, (ngal,) ) y1 = rng.normal(728, s, (ngal,) ) z1 = rng.normal(-932, s, (ngal,) ) r1 = np.sqrt( x1*x1 + y1*y1 + z1*z1 ) dec1 = np.arcsin(z1/r1) ra1 = np.arctan2(y1,x1) cat1 = treecorr.Catalog(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad') x2 = rng.normal(312, s, (ngal,) ) y2 = rng.normal(728, s, (ngal,) ) z2 = rng.normal(-932, s, (ngal,) ) r2 = np.sqrt( x2*x2 + y2*y2 + z2*z2 ) dec2 = np.arcsin(z2/r2) ra2 = np.arctan2(y2,x2) cat2 = treecorr.Catalog(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad') x3 = rng.normal(312, s, (ngal,) ) y3 = rng.normal(728, s, (ngal,) ) z3 = rng.normal(-932, s, (ngal,) ) r3 = np.sqrt( x3*x3 + y3*y3 + z3*z3 ) dec3 = np.arcsin(z3/r3) ra3 = np.arctan2(y3,x3) cat3 = treecorr.Catalog(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad') min_sep = 1. max_sep = 50. nbins = 50 min_u = 0.13 max_u = 0.89 nubins = 10 min_v = 0.13 max_v = 0.59 nvbins = 10 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat1, cat2, cat3) #print('ddd.ntri = ',ddd.ntri) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) true_ntri_123 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_132 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_213 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_231 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_312 = np.zeros( (nbins, nubins, 2*nvbins) ) true_ntri_321 = np.zeros( (nbins, nubins, 2*nvbins) ) bin_size = (log_max_sep - log_min_sep) / nbins ubin_size = (max_u-min_u) / nubins vbin_size = (max_v-min_v) / nvbins for i in range(ngal): for j in range(ngal): for k in range(ngal): djk = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2 + (z2[j]-z3[k])**2) dik = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2 + (z1[i]-z3[k])**2) dij = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2) if dij == 0.: continue if dik == 0.: continue if djk == 0.: continue if dij < dik: if dik < djk: d3 = dij; d2 = dik; d1 = djk ccw = is_ccw_3d(x1[i],y1[i],z1[i],x2[j],y2[j],z2[j],x3[k],y3[k],z3[k]) true_ntri = true_ntri_123 elif dij < djk: d3 = dij; d2 = djk; d1 = dik ccw = is_ccw_3d(x2[j],y2[j],z2[j],x1[i],y1[i],z1[i],x3[k],y3[k],z3[k]) true_ntri = true_ntri_213 else: d3 = djk; d2 = dij; d1 = dik ccw = is_ccw_3d(x2[j],y2[j],z2[j],x3[k],y3[k],z3[k],x1[i],y1[i],z1[i]) true_ntri = true_ntri_231 else: if dij < djk: d3 = dik; d2 = dij; d1 = djk ccw = is_ccw_3d(x1[i],y1[i],z1[i],x3[k],y3[k],z3[k],x2[j],y2[j],z2[j]) true_ntri = true_ntri_132 elif dik < djk: d3 = dik; d2 = djk; d1 = dij ccw = is_ccw_3d(x3[k],y3[k],z3[k],x1[i],y1[i],z1[i],x2[j],y2[j],z2[j]) true_ntri = true_ntri_312 else: d3 = djk; d2 = dik; d1 = dij ccw = is_ccw_3d(x3[k],y3[k],z3[k],x2[j],y2[j],z2[j],x1[i],y1[i],z1[i]) true_ntri = true_ntri_321 r = d2 u = d3/d2 v = (d1-d2)/d3 if r < min_sep or r >= max_sep: continue if u < min_u or u >= max_u: continue if v < min_v or v >= max_v: continue if not ccw: v = -v kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size )) ku = int(np.floor( (u-min_u) / ubin_size )) if v > 0: kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins else: kv = int(np.floor( (v-(-max_v)) / vbin_size )) assert 0 <= kr < nbins assert 0 <= ku < nubins assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 # With the regular NNNCorrelation class, we end up with the sum of all permutations. true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ true_ntri_312 + true_ntri_321 #print('true_ntri = ',true_ntri_sum) #print('diff = ',ddd.ntri - true_ntri_sum) np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Now repeat with the full CrossCorrelation class, which distinguishes the permutations. ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=1) ddd.process(cat1, cat2, cat3) #print('true_ntri = ',true_ntri_123) #print('diff = ',ddd.n1n2n3.ntri - true_ntri_123) np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132) np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213) np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231) np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312) np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321) # Repeat with binslop = 0 ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat1, cat2, cat3) #print('binslop = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3.ntri) #print('diff = ',ddd.n1n2n3.ntri - true_ntri_123) np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132) np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213) np.testing.assert_array_equal(ddd.n2n3n1.ntri, true_ntri_231) np.testing.assert_array_equal(ddd.n3n1n2.ntri, true_ntri_312) np.testing.assert_array_equal(ddd.n3n2n1.ntri, true_ntri_321) # And again with no top-level recursion ddd = treecorr.NNNCrossCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) ddd.process(cat1, cat2, cat3) #print('max_top = 0: ddd.n1n2n3.ntri = ',ddd.n1n2n3n.ntri) #print('true_ntri = ',true_ntri_123) #print('diff = ',ddd.n1n2n3.ntri - true_ntri_123) np.testing.assert_array_equal(ddd.n1n2n3.ntri, true_ntri_123) np.testing.assert_array_equal(ddd.n1n3n2.ntri, true_ntri_132)
np.testing.assert_array_equal(ddd.n2n1n3.ntri, true_ntri_213)
numpy.testing.assert_array_equal
""" Name: Coordinates.py Description: Marie-Annes Python code for converting between Horizon and Sky coordinate systems. """ from __future__ import division import numpy as np from astropy.time import TimeDelta from numpy.random import random_sample as randomu jd2000 = 2451545.0 def _premat(equinox1, equinox2, FK4=True): """ Return precession matrix needed to go from equinox1 to equinox2 equinox1 - original equinox of coordinates equinox2 - equinox to precess to returns 3x3 precession matrix Shameless stolen from astrolib premat.pro """ d2r = np.pi/180. s2r = d2r/3600. # convert seconds to radians T = 1e-3*(equinox2 - equinox1) if FK4: ST = 1e-3*(equinox1 - 2000.) # Compute 3 rotation angles A = s2r * T * \ (23062.181 + ST * (139.656 + 0.0139*ST) \ + T * (30.188 - 0.344 * ST + 17.998 * T)) B = s2r * T * T * (79.280 + 0.410*ST + 0.205*T) + A C = s2r * T * (20043.109 - ST*(85.33 + 0.217*ST) \ + T*(-42.665 - 0.217*ST - 41.833*T)) else: A = 0. B = 0. C = 0. sina = np.sin(A) sinb = np.sin(B) sinc = np.sin(C) cosa = np.cos(A) cosb = np.cos(B) cosc = np.cos(C) R = np.zeros((3, 3)) R[:,0] = np.array([ cosa*cosb*cosc - sina*sinb, sina*cosb+cosa*sinb*cosc, cosa*sinc]).flatten() R[:,1] = np.array([-cosa*sinb - sina*cosb*cosc, cosa*cosb - sina*sinb*cosc, -sina*sinc]).flatten() R[:,2] = np.array([-cosb*sinc, -sinb*sinc, cosc]).flatten() return R def _precess(_ra, _dec, equinox1, equinox2): """ Precess coordinate system from equinox1 to equinox2. ra and dec are inputs in radians. Shameless stolen from astrolib routine precess.pro (Based on procedure from Computational Spherical Astronomy by Taff (1983). p. 24.) """ ra = _ra*np.pi/180. dec= _dec*np.pi/180. a = np.cos(dec) vec1 = np.array([a*np.cos(ra), a*np.sin(ra), np.sin(dec)]).T # cartesian vector on sphere R0 = _premat(equinox1, equinox2) # get rotation matrix print(vec1.shape) vec1 = np.reshape(vec1, (vec1.shape[0], 1, vec1.shape[1])) R0 = np.reshape(R0, (1, R0.shape[0], R0.shape[1])) vec2 = np.sum(R0*vec1, axis=-1) #vec2 = R0.dot(vec1) ra_out = np.arctan2(vec2[:,1], vec2[:,0]) dec_out= np.arcsin(vec2[:,2]) #if ra_out < 0: # ra_out += 2.*np.pi #w = np.where((ra_out < 0))[0] #if len(w) > 0: # ra_out[w] += 2.*np.pi return np.mod(ra_out*180./np.pi, 360), dec_out*180./np.pi def _jd2gst(jd): """ Convert julian dates into Greenwich Sidereal Time. From Practical Astronomy With Your Calculator. """ jd0 = np.floor(jd - 0.5) + 0.5 T = (jd0 - 2451545.0) / 36525 T0 = 6.697374558 + 2400.051336 * T + 0.000025862 * T**2 T0 %= 24 ut = (jd - jd0) * 24 T0 += ut * 1.002737909 T0 %= 24 return T0 def _gst2lst(gst, geolon): """ Convert Greenwich Sidereal Time into Local Sidereal Time. """ # geolon: Geographic longitude EAST in degrees. return (gst + geolon / 15.) % 24 def _jd2lst(lng, jd): c = [280.46061837, 360.98564736629, 0.000387933, 38710000.0 ] t0 = jd - jd2000 t = t0/36525. theta = c[0] + (c[1] * t0) + t**2*(c[2] - t/ c[3] ) lst = ( theta + lng)/15.0 neg = np.where(lst < 0.0) n = len(neg) if n > 0: lst[neg] = 24. + np.mod(lst[neg] , 24) lst = np.mod(lst , 24.) return lst def _pang(el, dec, geolat): """ Generate parallactic angle from elevation and declination """ d2r = np.pi/180.0 r2d = 180.0/np.pi top = np.sin(geolat*d2r) - np.sin(el*d2r)*np.sin(dec*d2r) bot = np.cos(el*d2r)*np.cos(dec*d2r) p = np.arccos(top/bot) if isinstance(p, type(np.array([]))): p[np.isnan(p)] = 0 p[p > np.pi/2.] -= np.pi else: if np.isnan(p): p = 0 else: if p > np.pi/2.: p -= np.pi return p*r2d def _equ2hor(_ra, _dec, _jd, geolat, geolon, precess=False): """ Convert from ra/dec to az/el (by <NAME>). All inputs as degrees """ # Imports from numpy import arccos, arcsin, cos, pi, sin, where ra = np.array([_ra]).flatten() dec= np.array([_dec]).flatten() jd = np.array([_jd]).flatten() lst = _gst2lst(_jd2gst(jd), geolon) if precess: J_now = np.mean( (jd - 2451545.)/365.25 + 2000.0 ) ra, dec = _precess(ra, dec, 2000., J_now) #for i in range(len(J_now)): # ra[i], dec[i] = _precess(ra[i], dec[i], 2000., J_now[i]) az, el = _equ2hor_lst(ra, dec, lst, geolat) # Later return az, el def _equ2hor_lst(_ra, _dec, _lst, geolat): from numpy import arccos, arcsin, cos, pi, sin, where ra = np.array([_ra]).flatten() dec= np.array([_dec]).flatten() lst = np.array([_lst]).flatten() d2r = pi/180.0 r2d = 180.0/pi sin_dec = sin(dec*d2r) cos_dec = cos(dec*d2r) phi_rad = geolat*d2r sin_phi = sin(phi_rad) cos_phi = cos(phi_rad) ha = 15.0*_ra2ha(ra, lst) sin_ha = sin(ha*d2r) cos_ha = cos(ha*d2r) x = - cos_ha * cos_dec * sin_phi + sin_dec * cos_phi y = - sin_ha * cos_dec z = cos_ha * cos_dec * cos_phi + sin_dec * sin_phi r = np.sqrt(x**2 + y**2) az = np.arctan2(y, x)*180./np.pi el = np.arctan2(z, r)*180./np.pi w = (az < 0) az[w] = az[w] + 360. return az, el def _hor2equ(_az, _el, _jd, geolat, geolon, precess=False): """ Convert from az/el to ra/dec (by <NAME>). All inputs in degrees """ # Imports from numpy import arccos, arcsin, cos, pi, sin, where az = np.array([_az]).flatten() el = np.array([_el]).flatten() jd = np.array([_jd]).flatten() #lst = _gst2lst(_jd2gst(jd), geolon) lst = _jd2lst(geolon, jd) ra, dec = _hor2equ_lst(az, el, lst, geolat) if precess: J_now = np.mean( (jd - 2451545.)/365.25 + 2000.0 ) ra, dec = _precess(ra, dec, 2000., J_now) # Later return ra, dec def _hor2equ_lst(_az, _el, _lst, geolat): az = np.array([_az]).flatten() el = np.array([_el]).flatten() lst = np.array([_lst]).flatten() from numpy import arccos, arcsin, cos, pi, sin, where d2r = pi/180.0 r2d = 180.0/pi az_r = az*np.pi/180. el_r = el*np.pi/180. geolat_r = geolat*np.pi/180. # Convert to equatorial coordinates cos_el = cos(el_r) sin_el = sin(el_r) cos_phi = cos(geolat_r) sin_phi = sin(geolat_r) cos_az = cos(az_r) sin_az = sin(az_r) sin_dec = sin_el*sin_phi + cos_el*cos_phi*cos_az dec = arcsin(sin_dec) ha = [-sin_az*cos_el, -cos_az*sin_phi*cos_el + sin_el*cos_phi] ha = np.arctan2(ha[0], ha[1]) w = np.where(ha < 0)[0] if len(w) != 0: ha[w] = ha[w] + np.pi*2. ha = np.mod(ha, np.pi*2.) ra = lst*15.0*np.pi/180.-ha ra = where(ra >= 2.*np.pi, ra - 2.*np.pi, ra) ra = where(ra < 0.0, ra + 2.*np.pi, ra) ra *= 180./np.pi dec *= 180./np.pi return ra, dec def _ra2ha(ra, lst): """ Converts a right ascension to an hour angle. """ return (lst - ra / 15.0) % 24 def _equ2gal(_ra, _dec): """ Converts right ascension and declination to Galactic lon and lat. Uses rotation matrix Rg from 'Spherical Astronomy by <NAME>, Chapter 14, page 355' """ ra = _ra*np.pi/180. dec= _dec*np.pi/180. equVec = np.array([[np.cos(ra)*np.cos(dec)], [np.sin(ra)*np.cos(dec)], [np.sin(dec)]]) tg = 0. ag = (17. + 45.6/60.)*15.*np.pi/180. dg = -28.94*np.pi/180. Rg = np.array([[-0.054876, -0.873437, -0.483835], [ 0.494109, -0.444830, 0.746982], [-0.867666, -0.198076, 0.455984]]) Rg = np.reshape(Rg, (3, 3, 1)) Rg = np.transpose(Rg, [1,0,2]) test = Rg*equVec galVec = np.sum(Rg*equVec, axis=0)#Rg.dot(equVec) lon = np.arctan2(galVec[1],galVec[0]) lat = np.pi/2. - np.arctan2(np.sqrt(galVec[0]**2 + galVec[1]**2), galVec[2]) lon = lon *180./np.pi lat = lat*180./np.pi return lon, lat def _gal2equ(gl, gb, year=2000): """ Shamelessly copied from the IDL glactc routine. """ rapol = (12. + 49./60.)*np.pi/180.*15. decpol= (27.4)*np.pi/180. dlon = (123.0)*np.pi/180. sdp = np.sin(decpol) cdp = np.sqrt(1.0 - sdp*sdp) sgb = np.sin(gb) cgb = np.sqrt(1. - sgb**2) sdec = sgb*sdp + cgb*cdp*np.cos(dlon - gl) dec = np.arcsin(sdec) cdec = np.sqrt(1.-sdec**2) sinf = cgb * np.sin(dlon-gl)/cdec cosf = (sgb-sdp*sdec)/(cdp*cdec) ra = rapol + np.arctan2(sinf, cosf) ra *= 180./np.pi dec *= 180./np.pi if year != 2000: ra, dec = _precess(ra, dec, 2000., year) ra[ra > 360] -= 360 return np.mod(ra, 360), dec def _nutate(jd): dtor = np.pi/180. T = (jd[:] - 2451545.0)/36525.0 # Mean elongation of the Moon coeff1 = np.array([297.85036, 445267.111480, -0.0019142, 1./189474.]) d = np.mod(np.polyval(coeff1[::-1], T)*dtor, 2*np.pi) d = np.reshape(d, (d.size, 1)) # Sun's mean anomaly coeff2 = np.array([357.5277, 35999.050340, -0.0001603, -1./3e5 ]) m = np.mod(np.polyval(coeff2[::-1], T)*dtor, 2.*np.pi) m = np.reshape(m, (m.size, 1)) # Moon's mean anomaly coeff3 = np.array([134.96298, 477198.867398, 0.0086972, 1.0/5.625e4 ]) mprime = np.mod(np.polyval(coeff3[::-1], T)*dtor, 2.*np.pi) mprime = np.reshape(mprime, (mprime.size, 1)) # Moon's argument of latitude coeff4 = np.array([93.27191, 483202.017538, -0.0036825, -1.0/3.27270e5 ]) f = np.mod(np.polyval(coeff4[::-1], T)*dtor, 2.*np.pi) f = np.reshape(f, (f.size, 1)) # Longitude of the ascending node of the Moon's mean orbit on the ecliptic, # measured from the mean equinox of the date coeff5 = np.array([125.04452, -1934.136261, 0.0020708, 1./4.5e5]) omega = np.mod(np.polyval(coeff5[::-1], T)*dtor, 2.*np.pi) omega = np.reshape(omega, (omega.size, 1)) d_lng = np.array([0,-2,0,0,0,0,-2,0,0,-2,-2,-2,0,2,0,2,0,0,-2,0,2,0,0,-2,0,-2,0,0,2, -2,0,-2,0,0,2,2,0,-2,0,2,2,-2,-2,2,2,0,-2,-2,0,-2,-2,0,-1,-2,1,0,0,-1,0,0, 2,0,2]) d_lng = np.reshape(d_lng, (d_lng.size, 1)) m_lng = np.concatenate(( np.array([0,0,0,0,1,0,1,0,0,-1]),np.zeros(17),np.array([2,0,2,1,0,-1,0,0,0,1,1,-1,0, 0,0,0,0,0,-1,-1,0,0,0,1,0,0,1,0,0,0,-1,1,-1,-1,0,-1]) )) m_lng = np.reshape(m_lng, (m_lng.size, 1)) mp_lng = np.array([0,0,0,0,0,1,0,0,1,0,1,0,-1,0,1,-1,-1,1,2,-2,0,2,2,1,0,0,-1,0,-1, 0,0,1,0,2,-1,1,0,1,0,0,1,2,1,-2,0,1,0,0,2,2,0,1,1,0,0,1,-2,1,1,1,-1,3,0]) mp_lng = np.reshape(mp_lng, (mp_lng.size, 1)) f_lng = np.array([0,2,2,0,0,0,2,2,2,2,0,2,2,0,0,2,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0, 0,-2,2,2,2,0,2,2,0,2,2,0,0,0,2,0,2,0,2,-2,0,0,0,2,2,0,0,2,2,2,2]) f_lng = np.reshape(f_lng, (f_lng.size, 1)) om_lng = np.array([1,2,2,2,0,0,2,1,2,2,0,1,2,0,1,2,1,1,0,1,2,2,0,2,0,0,1,0,1,2,1, 1,1,0,1,2,2,0,2,1,0,2,1,1,1,0,1,1,1,1,1,0,0,0,0,0,2,0,0,2,2,2,2]) om_lng = np.reshape(om_lng, (om_lng.size, 1)) sin_lng = np.array([-171996, -13187, -2274, 2062, 1426, 712, -517, -386, -301, 217, -158, 129, 123, 63, 63, -59, -58, -51, 48, 46, -38, -31, 29, 29, 26, -22, 21, 17, 16, -16, -15, -13, -12, 11, -10, -8, 7, -7, -7, -7, 6,6,6,-6,-6,5,-5,-5,-5,4,4,4,-4,-4,-4,3,-3,-3,-3,-3,-3,-3,-3 ]) sin_lng = np.reshape(sin_lng, (sin_lng.size, 1)) sdelt = np.concatenate(( np.array([-174.2, -1.6, -0.2, 0.2, -3.4, 0.1, 1.2, -0.4, 0, -0.5, 0, 0.1, 0,0,0.1, 0,-0.1]), np.zeros(10), np.array([-0.1, 0, 0.1]), np.zeros(33) )) sdelt = np.reshape(sdelt, (sdelt.size, 1)) cos_lng = np.concatenate(( np.array([ 92025, 5736, 977, -895, 54, -7, 224, 200, 129, -95,0,-70,-53,0, -33, 26, 32, 27, 0, -24, 16,13,0,-12,0,0,-10,0,-8,7,9,7,6,0,5,3,-3,0,3,3, 0,-3,-3,3,3,0,3,3,3]), np.zeros(14) )) cos_lng = np.reshape(cos_lng, (cos_lng.size, 1)) cdelt = np.concatenate(( np.array([8.9, -3.1, -0.5, 0.5, -0.1, 0.0, -0.6, 0.0, -0.1, 0.3]), np.zeros(53) )) cdelt = np.reshape(cdelt, (cdelt.size, 1)) n = len(jd) nut_long = np.zeros(n) nut_obliq = np.zeros(n) arg = d_lng.dot(d.T) + m_lng.dot(m.T) + mp_lng.dot(mprime.T) + f_lng.dot(f.T) + om_lng.dot(omega.T) sarg = np.sin(arg) carg = np.cos(arg) for i in range(n): nut_long[i] = 1e-4*np.sum( (sdelt.flatten()*T[i] + sin_lng.flatten())*sarg[:,i].flatten()) nut_obliq[i] = 1e-4*np.sum( (cdelt.flatten()*T[i] + cos_lng.flatten())*carg[:,i].flatten()) return nut_long, nut_obliq def _co_nutate(jd, ra, dec): d2r = np.pi/180. d2as = np.pi/(180.*3600.) T = (jd - 2451545.0)/36525.0 # Julian centures from J2000 of JD # must calculate obliquity of ecliptic d_psi, d_eps = _nutate(jd) eps0 = 23.4392911*3600. - 46.8150*T - 0.00059*T**2 + 0.001813*T**3 eps = (eps0 + d_eps)/3600.*d2r # true obliquity of the ecliptic in radians #useful numbers ce =
np.cos(eps)
numpy.cos
import numpy as np from PIL import Image, ImageOps from skimage.feature import hog from segmenter.symbol_segmenter import segment_image from utils.image import img_to_binary # convert to a size similar across all images def normalize_image(img_array): img = img_to_binary(Image.fromarray(img_array)) crops_images = segment_image(img) crops, cropped_images = list(zip(*crops_images)) if len(crops) != 1: # FIXME: manual crop, since only one symbol per picture w, h = img.size img_arr = np.asarray(img) left, top, right, down = 1000, 1000, -1000, -1000 for x in range(w): for y in range(h): # black if not img_arr[y, x]: if y > down: down = y if y < top: top = y if x > right: right = x if x < left: left = x crop = (left, top, right + 1, down + 1) cropped_images = [img.crop(crop)] cropped_img = cropped_images[0] w, h = cropped_img.size resize_ratio = min(128 / w, 128 / h) new_size = [int(resize_ratio * w), int(resize_ratio * h)] # sometimes, the frac will have 0 height, because of resize, so make sure to at least leave one pixel if new_size[0] == 0: new_size[0] = 1 if new_size[1] == 0: new_size[1] = 1 resized_img = cropped_img.resize(new_size) gray_img = resized_img.convert('L') inverted_gray_image = ImageOps.invert(gray_img) w, h = inverted_gray_image.size final_img = Image.new('L', (128, 128)) final_img.paste(inverted_gray_image, ((128 - w) // 2, (128 - h) // 2)) return np.asarray(final_img) def extract_hog_features(img): img = normalize_image(
np.asarray(img)
numpy.asarray
import numpy as np import pandas as pd from utils import unison_shuffled_copies def oversample_minority(X_data, y_data): labels = np.argmax(y_data[:, :-1],axis=1) counts = pd.Series(labels).value_counts(sort=False) max_count = counts.max() new_X = [] new_y = [] for c, class_count in enumerate(counts): num_data_to_add = max_count - class_count q = num_data_to_add // class_count r = num_data_to_add % class_count X_c = X_data[labels == c].copy() y_c = y_data[labels == c].copy() # duplicate all minority classes new_X_c = np.concatenate((X_c, np.tile(X_c, (q, 1, 1)), X_c[:r,:,:])) new_y_c = np.concatenate((y_c, np.tile(y_c, (q, 1)), y_c[:r,:])) new_X.append(new_X_c) new_y.append(new_y_c) new_X = np.vstack(new_X) new_y =
np.vstack(new_y)
numpy.vstack
""" Masks areas to be carved out based on contour """ import itertools import numpy, scipy.interpolate, numpy.random import vec narrowing_factor = 1.5 # Used when river occupies both sides of a chunk corner_radius_offset = 0.9 river_deviation_centre = (-2, 2) river_deviation_width = (-1, 1) river_frequency_centre = 5.1 river_frequency_width = 2.8 class ChunkSeed(object): """ Used to seed generation of chunk specific features such as winding rivers. """ def __init__(self, level_seed, location): self.level_seed = numpy.cast[int](numpy.array(level_seed)) self.location = numpy.cast[int](numpy.array(location)) def offset(self, relative): """ Returns another ChunkSeed object for a chunk offset by the specified amount. """ return ChunkSeed(self.level_seed, self.location + numpy.array(relative)) def __side_seed(self, side): # Generated seeds will be the same for shared edges side = self.location + numpy.cast[int]((side + numpy.ones(len(side)))/2) return side*self.level_seed def centre_seed(self, side): """ Seed for river centre generation """ return numpy.cast[numpy.int32](self.__side_seed(side)) def width_seed(self, side): """ Seed for river width generation """ return numpy.cast[numpy.int32](self.__side_seed(side)*2) class Meander(object): """ Using the 'seed' integer, used to produce a series of values sampled at an integral interval, interpolated from a random series at interval 'step' found in the specified 'range'. If a final value is specified for the output series then it's allowed to deviate by the 'final_precision' fraction of the full range. """ def __init__(self, seed, step, range=(-1, 1), final_precision=0.05): self.seed = seed self.step = step self.range = range self.final_precision = final_precision @property def seed(self): return self._seed @seed.setter def seed(self, val): # Numpy now enforces mtrand 32-bit seed integer restriction self._seed = val & 0xffffffff def first(self): """ Return value of the first point of the generated series. """ gen = numpy.random.mtrand.RandomState(self.seed) return int(numpy.round(gen.uniform(self.range[0], self.range[1], 1)[0])) def series(self, points, final=None): """ Produces a 'points' number long series of interpolated values. If a 'final' vale is supplied then the last value in the returned series will match this value to within the precision specified by 'final_precision'. """ # Get the source random samples source_points = int(numpy.ceil(float(points)/self.step)) gen = numpy.random.mtrand.RandomState(self.seed) y1 = gen.uniform(self.range[0], self.range[1], source_points) #x1 = numpy.linspace(-(float(source_points) % step), float(points) - 1, source_points) x1 = numpy.linspace(0, float(points) + float(source_points) % self.step - 1, source_points) # Adjust final sample to meet required result if final is not None: accept = abs(self.range[1] - self.range[0])*self.final_precision for i in xrange(0, 20): # Really shouldn't go deeper than this but let's be sure f = scipy.interpolate.interp1d(x1, y1, kind='cubic') error = final - f(float(points) - 1) if abs(error) < accept: break else: y1[-1] = y1[-1] + error # Find interpolated points x2 = numpy.linspace(0.0, float(points) - 1, points) y2 = scipy.interpolate.interp1d(x1, y1, kind='cubic')(x2) return numpy.cast[int](
numpy.round(y2)
numpy.round
import numpy as np import numpy.random as npr from test_util import * from funkyyak import grad npr.seed(1) def test_dot(): def fun(x, y): return to_scalar(np.dot(x, y)) mat1 = npr.randn(10, 11) mat2 = npr.randn(10, 11) vect1 = npr.randn(10) vect2 = npr.randn(11) vect3 = npr.randn(11) check_grads(fun, mat1, vect2) check_grads(fun, mat1, mat2.T) check_grads(fun, vect1, mat1) check_grads(fun, vect2, vect3) def test_max(): def fun(x): return to_scalar(np.max(x)) d_fun = lambda x : to_scalar(grad(fun)(x)) mat = npr.randn(10, 11) check_grads(fun, mat) check_grads(d_fun, mat) def test_sum_1(): def fun(x): return to_scalar(np.sum(x)) d_fun = lambda x : to_scalar(grad(fun)(x)) mat = npr.randn(10, 11) check_grads(fun, mat) check_grads(d_fun, mat) def test_sum_2(): def fun(x): return to_scalar(
np.sum(x, axis=0)
numpy.sum
"""Edge filtering function""" import numpy as np from scipy import stats, sparse import sys from cidre import utils from functools import partial class EdgeFilter: def __init__( self, min_edge_weight=0, alpha=0.01, remove_selfloop=True, min_expected_weight=1 ): self.alpha = alpha self.min_edge_weight = min_edge_weight self.remove_selfloop = remove_selfloop self.min_expected_weight = min_expected_weight def fit(self, A, group_membership, mask=None): """Find the excessive edges in the network using the dcSBM as a null model. :param A: Network :type A: scipy sparse matrix :param group_membership: group membership of nodes. group_membership[i] is the group to which node i belongs. :type group_membership: numpy.array :param mask: mask[i,j] = 1 to set edge (i,j) to be insignificant, defaults to None :type mask: scipy sparse matrix, optional """ if group_membership is None: group_membership = np.zeros(A.shape[0]).astype(int) p_value, src, dst, w = self._calc_p_values_dcsbm( A, group_membership, self.min_expected_weight ) if self.remove_selfloop: s = src != dst p_value, src, dst, w = p_value[s], src[s], dst[s], w[s] # Remove edges less than minimum weight if self.min_edge_weight > 0: s = w >= self.min_edge_weight p_value, src, dst, w = p_value[s], src[s], dst[s], w[s] # Mask pre-selected edges if mask is not None: wth = np.array(mask[(src_, trg_)]).reshape(-1) s = np.isclose(wth, 0) p_value, src, dst, w = p_value[s], src[s], dst[s], w[s] # Perform the Benjamini-Hochberg statistical test is_significant = self._benjamini_hochberg_test(p_value) # Find the excessive edges src, dst, w = src[is_significant], dst[is_significant], w[is_significant] # Construct the filter self.filter = self._make_filter_func(src, dst, None, A.shape[0]) def transform(self, src, dst, w): return self.filter(src, dst, w) def _calc_p_values_dcsbm(self, A, group_membership, min_expected_weight=1): """Calculate the p_values using the degree-corrected stochastic block model. :param A: Adjacency matrix. Adjacency matrix, where A[i,j] indicates the weight of the edge from node i to node j. :type A: scipy sparse matrix :param group_membership: group_membership[i] indicates the ID of the group to which node i belongs :type group_membership: numpy.array :return: p-values :rtype: float """ N = A.shape[0] indeg = np.array(A.sum(axis=0)).reshape(-1) outdeg = np.array(A.sum(axis=1)).reshape(-1) C_SBM = utils.to_community_matrix(group_membership) Lambda = C_SBM.T @ A @ C_SBM Din = np.array(Lambda.sum(axis=0)).reshape(-1) Dout = np.array(Lambda.sum(axis=1)).reshape(-1) theta_in = indeg / np.maximum(C_SBM @ Din, 1.0) theta_out = outdeg / np.maximum(C_SBM @ Dout, 1.0) src, dst, w = utils.find_non_self_loop_edges(A) lam = ( np.array(Lambda[group_membership[src], group_membership[dst]]).reshape(-1) * theta_out[src] * theta_in[dst] ) lam = np.maximum(lam, min_expected_weight) pvals = 1.0 - stats.poisson.cdf(w - 1, lam) return pvals, src, dst, w def _benjamini_hochberg_test(self, pvals): """Benjamini-Hochberg statistical test :param pvals: p-values :type pvals: numpy.array :return: significant[i] = True if the ith element is significant. Otherwise signfiicant[i] = False. :rtype: numpy.array (bool) """ order = np.argsort(pvals) M = pvals.size is_sig = pvals[order] <= (self.alpha * np.arange(1, M + 1) / M) if np.any(is_sig) == False: return is_sig last_true_id = np.where(is_sig)[0][-1] is_sig = np.zeros(M) is_sig[order[: (last_true_id + 1)]] = 1 return is_sig > 0 def _make_filter_func(self, src, trg, wth, N): """Make a filter function :param src: Source node :type src: np.array :param trg: Target node :type trg: np.array :param wth: Minimum Weight of edges between source and target nodes :type wth: numpy.array :param N: Number of nodes :type N: int :return: Filtering function :rtype: function """ if wth is None: wth = 1e-8 *
np.ones_like(src)
numpy.ones_like
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Mar 16 11:57:09 2019 @author: smrak """ from pyGnss import pyGnss from pyGnss import gnssUtils as gu from datetime import datetime, timedelta import georinex as gr import numpy as np from glob import glob from dateutil import parser import yaml import os import h5py from argparse import ArgumentParser #import matplotlib.pyplot as plt if __name__ == '__main__': p = ArgumentParser() p.add_argument('date') p.add_argument('rxlist', type = str, help = 'Rxlist as a .yaml file') p.add_argument('--elmask', type = int, default = 30) p.add_argument('--tlim', default = None, help = "start, stop times example 06:00 08:00", nargs=2, type=str) p.add_argument('-o', '--ofn', help = 'Output filename with or withou root folder.', default=None) p.add_argument('--ts', help = 'sampling rate', default = 30, type = int) p.add_argument('--cfg', help = 'Path to the config (yaml) file', default = None) p.add_argument('--log', help = 'If you prefer to make a .log file?', action = 'store_true') p.add_argument('--stec', help = 'Save slant TEC?', action = 'store_true') P = p.parse_args() # GLOBAL VARIABLES if P.cfg is None: OBSFOLDER = '/media/smrak/gnss/obs/' NAVFOLDER = '/media/smrak/gnss/nav/' SBFOLDER = '/media/smrak/gnss/jplg/' SAVEFOLDER = '/media/smrak/gnss/hdf/' else: yamlcfg = yaml.load(open(P.cfg, 'r')) OBSFOLDER = yamlcfg.get('obsfolder') NAVFOLDER = yamlcfg.get('navfolder') SBFOLDER = yamlcfg.get('sbfolder') SAVEFOLDER = yamlcfg.get('savefolder') date = parser.parse(P.date) year = date.year day = date.strftime('%j') rxlist = os.path.expanduser(P.rxlist) el_mask = P.elmask tlim = P.tlim Ts = P.ts weights=[1, 4, 7, 10] # Obs nav nc_root = os.path.join(OBSFOLDER, str(year)) # Filter input files stream = yaml.load(open(rxlist, 'r')) rxn = stream.get('rx') rx_total = stream.get('total') nc_folder = os.path.join(nc_root, str(day)) + '/' nc_list = np.array(sorted(glob(nc_folder + '*.nc'))) nc_rx_name = np.array([os.path.split(r)[1][:4] for r in nc_list]) idn = np.isin(nc_rx_name, rxn) fnc = nc_list[idn] # Nav file nav_root = NAVFOLDER fnav = os.path.join(nav_root, 'brdc' + str(day) + '0.' + str(year)[2:] + 'n') # jplg file jplg_root = SBFOLDER fjplg = os.path.join(jplg_root, 'jplg' + str(day) + '0.' + str(year)[2:] + 'i') satbias = pyGnss.getSatBias(fjplg) # Processing options satpos = True args = ['L1', 'L2'] #Common time array if tlim is None: t0 = datetime.strptime('{} {}'.format(year,int(day)),'%Y %j') t1 = datetime.strptime('{} {}'.format(year,int(day) + 1),'%Y %j') else: assert len(tlim) == 2 t0 = datetime.strptime('{} {}-{}'.format(year,int(day),tlim[0]),'%Y %j-%H:%M') t1 = datetime.strptime('{} {}-{}'.format(year,int(day),tlim[1]),'%Y %j-%H:%M') t = np.arange(t0, t1, Ts, dtype='datetime64[s]') #datetime64[s] tlim = [t0, t1] tl = t.shape[0] # Savename if P.ofn is None: sfn = str(year) + '_' + tlim[0].strftime('%m%dT%H%M') + '-' + tlim[1].strftime('%m%dT%H%M') + '_' + os.path.split(rxlist)[1] + '_' + str(el_mask) +'el_' + str(Ts) + 's.h5' savefn = os.path.join(SAVEFOLDER, sfn) else: if os.path.isfile(P.ofn): assert os.file.splitext(P.ofn)[1] in ('.h5', '.hdf5') savefn = P.ofn elif os.path.isdir(P.ofn): assert os.file.splitext(P.ofn)[1] in ('.h5', '.hdf5') savefn = os.path.join(P.ofn, os.path.split(rxlist)[1] + '_' + str(year) + '.h5') else: assert os.file.splitext(P.ofn)[1] in ('.h5', '.hdf5') savefn = os.path.join(SAVEFOLDER, P.ofn) # Open log file is choosen so if P.log is not None: logfn = os.path.splitext(savefn)[0] + '.log' LOG = open(logfn, 'w') LOG.close() # Correct tlim for processing purpuses: if P.tlim is not None: tlim[0] -= timedelta(hours=1) tlim[1] += timedelta(hours=1) # Output arrays svl = 32 #gr.load(fnc[0]).sv.values.shape[0] rxl = fnc.shape[0] if P.stec : slanttec = np.nan * np.zeros((tl, svl, rxl)) residuals = np.nan * np.zeros((tl, svl, rxl)) if Ts == 1: snr = np.nan * np.zeros((tl, svl, rxl)) el = np.nan * np.zeros((tl, svl, rxl)) az = np.nan * np.zeros((tl, svl, rxl)) rxpos = np.nan * np.zeros((rxl, 3)) for irx, fnc in enumerate(fnc): # New Array TEC = np.nan * np.zeros(t.shape[0], dtype=np.float16) TECD = np.nan *
np.zeros(t.shape[0], dtype=np.float16)
numpy.zeros
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import logging import numpy as np import scipy from scipy import stats from pymatgen.analysis.defects.core import DefectCorrection from pymatgen.analysis.defects.utils import ang_to_bohr, hart_to_ev, eV_to_k, \ generate_reciprocal_vectors_squared, QModel, converge import matplotlib.pyplot as plt __author__ = "<NAME>, <NAME>" __copyright__ = "Copyright 2018, The Materials Project" __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Development" __date__ = "Mar 15, 2018" logger = logging.getLogger(__name__) class FreysoldtCorrection(DefectCorrection): """ A class for FreysoldtCorrection class. Largely adapated from PyCDT code """ def __init__(self, dielectric_const, q_model=None, energy_cutoff=520, madetol=0.0001, axis=None): """ Initializes the Freysoldt Correction Args: dielectric_const (float or 3x3 matrix): Dielectric constant for the structure q_mode (QModel): instantiated QModel object or None. Uses default parameters to instantiate QModel if None supplied energy_cutoff (int): Maximum energy in eV in recipripcol space to perform integration for potential correction madeltol(float): Convergence criteria for the Madelung energy for potential correction axis (int): Axis to calculate correction. Averages over all three if not supplied. """ self.q_model = QModel() if not q_model else q_model self.energy_cutoff = energy_cutoff self.madetol = madetol self.dielectric_const = dielectric_const if isinstance(dielectric_const, int) or \ isinstance(dielectric_const, float): self.dielectric = float(dielectric_const) else: self.dielectric = float(np.mean(np.diag(dielectric_const))) self.axis = axis self.metadata = {"pot_plot_data": {}, "pot_corr_uncertainty_md": {}} def get_correction(self, entry): """ Gets the Freysoldt correction for a defect entry Args: entry (DefectEntry): defect entry to compute Freysoldt correction on. Requires following parameters in the DefectEntry to exist: axis_grid (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions. Same length as planar average lists): A list of 3 numpy arrays which contain the cartesian axis values (in angstroms) that correspond to each planar avg potential supplied. bulk_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the bulk supercell. defect_planar_averages (3 x NGX where NGX is the length of the NGX grid in the x,y and z axis directions.): A list of 3 numpy arrays which contain the planar averaged electrostatic potential for the defective supercell. scaling_matrix (3 x 1 matrix): scaling matrix required to convert the entry.defect.bulk_structure object into the lattice which is used by the bulk_planar_average and defect_planar_average """ if not self.axis: list_axis_grid = np.array(entry.parameters["axis_grid"]) list_bulk_plnr_avg_esp = np.array(entry.parameters["bulk_planar_averages"]) list_defect_plnr_avg_esp = np.array(entry.parameters["defect_planar_averages"]) list_axes = range(len(list_axis_grid)) else: list_axes = np.array(self.axis) list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp = [], [], [] for ax in list_axes: list_axis_grid.append(np.array(entry.parameters["axis_grid"][ax])) list_bulk_plnr_avg_esp.append(np.array(entry.parameters["bulk_planar_averages"][ax])) list_defect_plnr_avg_esp.append(np.array(entry.parameters["defect_planar_averages"][ax])) bulk_struct = entry.defect.bulk_structure.copy() if "scaling_matrix" in entry.parameters.keys(): bulk_struct.make_supercell(entry.parameters["scaling_matrix"]) lattice = bulk_struct.lattice q = entry.defect.charge es_corr = self.perform_es_corr(lattice, entry.charge) pot_corr_tracker = [] for x, pureavg, defavg, axis in zip(list_axis_grid, list_bulk_plnr_avg_esp, list_defect_plnr_avg_esp, list_axes): tmp_pot_corr = self.perform_pot_corr( x, pureavg, defavg, lattice, entry.charge, entry.site.coords, axis, widthsample=1.0) pot_corr_tracker.append(tmp_pot_corr) pot_corr =
np.mean(pot_corr_tracker)
numpy.mean
import numpy as np import pyvista as pv import matplotlib.pyplot as plt from scipy.interpolate import RegularGridInterpolator from scipy.ndimage import gaussian_filter1d import time import copy from tqdm import tqdm class gridmod3d: _subprops = None _nprops = None _ncells = None _npoints = None _deltas = None _gorigin = None _rotdeg = None _rotrad = None _axorder = None shape = None def __init__(self,subprops,nprops,axorder,dims,deltas,gorigin=(0,0,0),rotdeg=0): assert len(subprops) == nprops assert len(subprops[0,:,0,0]) == dims[0] assert len(subprops[0,0,:,0]) == dims[1] assert len(subprops[0,0,0,:]) == dims[2] assert len(dims) == len(deltas) assert len(dims) == len(gorigin) assert len(dims) == len(axorder) assert self._checkAxOrderDict(axorder) self._subprops = np.copy(subprops) self._nprops = nprops self._deltas = deltas self._gorigin = gorigin self._npoints = dims self._ncells = (dims[0]-1,dims[1]-1,dims[2]-1) self._axorder = axorder self._rotdeg = rotdeg self._rotrad = self._rotdeg*np.pi/180 self.shape = self._subprops.shape def __getitem__(self,key): return self._subprops[key] def __str__(self): str_dict = { 'ncells':self._ncells,'npoints':self._npoints, \ 'deltas':self._deltas,'origin':self._gorigin, \ 'rotation (degres)':self._rotdeg, \ 'rotation (rads)':self._rotrad, \ 'shape':self.shape,'Axis Order':self._axorder } return str(str_dict) def _rotate_xy_coords(self,xyc,deg): if deg != 0: rad = deg*np.pi/180 rrm = np.array([[np.cos(rad),-np.sin(rad)],[np.sin(rad),np.cos(rad)]]) for i in range(xyc.shape[0]): xyc[i,:] = rrm.dot(xyc[i,:]) return xyc def _rotate_xyz_coords(self,xyzc,deg): if deg != 0: rad = deg*np.pi/180 rrm = np.array([[np.cos(rad),-np.sin(rad), 0],[np.sin(rad),np.cos(rad),0],[0, 0, 1]]) for i in range(xyzc.shape[0]): xyzc[i,:] = rrm.dot(xyzc[i,:]) return xyzc def _rotate_translate_xy_coords(self,xyc,deg): xyc = self._rotate_xy_coords(xyc,deg) xyc[:,0] += self._gorigin[0] xyc[:,1] += self._gorigin[1] return xyc def _rotate_translate_xyz_coords(self,xyzc,deg): xyzc = self._rotate_xyz_coords(xyzc,deg) xyzc[:,0] += self._gorigin[0] xyzc[:,1] += self._gorigin[1] xyzc[:,2] += self._gorigin[2] return xyzc def subsample(self, isx=0,iex=None,idx=2, \ isy=0,iey=None,idy=2, \ isz=0,iez=None,idz=2 ): _isx = int(isx+0.5) _isy = int(isy+0.5) _isz = int(isz+0.5) _iex = iex _iey = iey _iez = iez if iex is not None: _iex = int(iex+0.5) if iey is not None: _iey = int(iey+0.5) if iez is not None: _iez = int(iez+0.5) _idx = int(idx+0.5) _idy = int(idy+0.5) _idz = int(idz+0.5) save_axorder = self._axorder.copy() self.changeAxOrder({'X':0,'Y':1,'Z':2}) self._subprops = np.copy(self._subprops[:,_isx:_iex:_idx,_isy:_iey:_idy,_isz:_iez:_idz]) nx = self._subprops.shape[1] ny = self._subprops.shape[2] nz = self._subprops.shape[3] dx = self._deltas[0]*_idx dy = self._deltas[1]*_idy dz = self._deltas[2]*_idz ox = self._gorigin[0] + _isx*self._deltas[0] oy = self._gorigin[1] + _isy*self._deltas[1] oz = self._gorigin[2] + _isz*self._deltas[2] self._npoints = (nx,ny,nz) self._ncells = (nx-1,ny-1,nz-1) self._deltas = (dx,dy,dz) self._gorigin = (ox,oy,oz) self.changeAxOrder(save_axorder) self.shape = self._subprops.shape def _getLocalCoordsCellsByAxis(self,key): assert (key == 'X') or (key == 'Y') or (key == 'Z') ax_dict = {'X':0,'Y':1,'Z':2} i = ax_dict[key] i = self._axorder[key] ld = self._deltas[i] ln = self._ncells[i] imin = 0.5*ld imax = imin + (ln-1)*ld + 0.5*ld return np.arange(imin,imax,ld) def _getLocalCoordsPointsByAxis(self,key): assert (key == 'X') or (key == 'Y') or (key == 'Z') ax_dict = {'X':0,'Y':1,'Z':2} i = ax_dict[key] ld = self._deltas[i] ln = self._npoints[i] imin = 0 if key == 'Z': imin = self._gorigin[i] imax = imin + (ln-1)*ld + 0.5*ld return np.arange(imin,imax,ld) def getLocalCoordsCellsX(self): return self._getLocalCoordsCellsByAxis('X') def get_local_coords_cells_x(self): return self.getLocalCoordsCellsX() def getLocalCoordsCellsY(self): return self._getLocalCoordsCellsByAxis('Y') def get_local_coords_cells_y(self): return self.getLocalCoordsCellsY() def getLocalCoordsCellsZ(self): return self._getLocalCoordsCellsByAxis('Z') def get_local_coords_cells_z(self): return self.getLocalCoordsCellsZ() def getLocalCoordsPointsX(self): return self._getLocalCoordsPointsByAxis('X') def get_local_coords_points_x(self): return self.getLocalCoordsPointsX() def getLocalCoordsPointsY(self): return self._getLocalCoordsPointsByAxis('Y') def get_local_coords_points_y(self): return self.getLocalCoordsPointsY() def getLocalCoordsPointsZ(self): return self._getLocalCoordsPointsByAxis('Z') def get_local_coords_points_z(self): return self.getLocalCoordsPointsZ() def getLocalCoordsCellsXY(self): lcx = self.getLocalCoordsCellsX() lcy = self.getLocalCoordsCellsY() return np.transpose([np.tile(lcx, len(lcy)), np.repeat(lcy, len(lcx))]) def get_local_coords_cells_xy(self): return self.getLocalCoordsCellsXY() def getLocalCoordsPointsXY(self): lcx = self.getLocalCoordsPointsX() lcy = self.getLocalCoordsPointsY() return np.transpose([np.tile(lcx, len(lcy)), np.repeat(lcy, len(lcx))]) def get_local_coords_points_xy(self): return self.getLocalCoordsPointsXY() def getLocalCoordsCellsXYZ(self): lcx = self.getLocalCoordsCellsX() lcy = self.getLocalCoordsCellsY() lcz = self.getLocalCoordsCellsZ() return np.vstack(np.meshgrid(lcx,lcy,lcz)).reshape(3,-1).T def get_local_coords_cells_xyz(self): return self.getLocalCoordsCellsXYZ() def getLocalCoordsPointsXYZ(self): lcx = self.getLocalCoordsPointsX() lcy = self.getLocalCoordsPointsY() lcz = self.getLocalCoordsPointsZ() return np.vstack(np.meshgrid(lcx,lcy,lcz)).reshape(3,-1).T def get_local_coords_points_xyz(self): return self.getLocalCoordsPointsXYZ() def _getGlobalCoordsXY(self,as_points=True): if as_points: lxy = self.getLocalCoordsPointsXY() else: lxy = self.getLocalCoordsCellsXY() gxy = self._rotate_translate_xy_coords(lxy,self._rotdeg) return gxy def _getGlobalCoordsXYZ(self,as_points=True): if as_points: lxyz = self.getLocalCoordsPointsXYZ() else: lxyz = self.getLocalCoordsCellsXYZ() gxyz = self._rotate_translate_xyz_coords(lxyz,self._rotdeg) return gxyz def getGlobalCoordsPointsXY(self): return self._getGlobalCoordsXY(as_points=True) def get_global_coords_points_xy(self): return self.getGlobalCoordsPointsXY() def getGlobalCoordsCellsXY(self): return self._getGlobalCoordsXY(as_points=False) def get_global_coords_cells_xy(self): return self.getGlobalCoordsCellsXY() def getGlobalCoordsPointsXYZ(self): return self._getGlobalCoordsXYZ(as_points=True) def get_global_coords_points_xyz(self): return self.getGlobalCoordsPointsXYZ() def getGlobalCoordsCellsXYZ(self): return self._getGlobalCoordsXYZ(as_points=False) def get_global_coords_cells_xyz(self): return self.getGlobalCoordsCellsXYZ() def _checkAxOrderDict(self,dic): isgood = False isgood = isinstance(dic, dict) if not isgood: return False isgood = isgood & (len(dic) == 3) if not isgood: return False isgood = isgood & ('X' in dic.keys()) if not isgood: return False dicX = dic['X'] isgood = isgood & ((dicX == 0) | (dicX == 1) | (dicX == 2)) if not isgood: return False isgood = isgood & ('Y' in dic.keys()) if not isgood: return False dicY = dic['Y'] isgood = isgood & ((dicY == 0) | (dicY == 1) | (dicY == 2)) if not isgood: return False isgood = isgood & ('Z' in dic.keys()) if not isgood: return False dicZ = dic['Z'] isgood = isgood & ((dicZ == 0) | (dicZ == 1) | (dicZ == 2)) if not isgood: return False isgood = isgood & (dicX != dicY) & (dicX != dicZ) & (dicY != dicZ) return isgood def changeAxOrder(self,dic): assert self._checkAxOrderDict(dic) itrans = np.zeros((4),dtype=np.int) odicX = self._axorder['X']+1 odicY = self._axorder['Y']+1 odicZ = self._axorder['Z']+1 #print('old axorder:',self._axorder) ndicX = dic['X']+1 ndicY = dic['Y']+1 ndicZ = dic['Z']+1 itrans[ndicX] = odicX itrans[ndicY] = odicY itrans[ndicZ] = odicZ #print('itrans:',itrans) temp_props = np.copy(self._subprops.transpose(itrans),order='C') del self._subprops # clean up memory because thses can be big self._subprops = temp_props self._axorder['X'] = dic['X'] self._axorder['Y'] = dic['Y'] self._axorder['Z'] = dic['Z'] self.shape = self._subprops.shape def change_ax_order(self,dic): return self.changeAxOrder(dic) def getNPArray(self): return np.copy(self._subprops) def get_np_array(self): return self.getNPArray() def depthValsSliceFromZIndex(self,iz): assert (0 <= iz) & (iz <= self._npoints[2]) save_axorder = self._axorder.copy() self.changeAxOrder({'X':2,'Y':1,'Z':0}) slice_dprops =
np.copy(self._subprops[:,iz,:,:])
numpy.copy
import pickle import gym import time import numpy as np import random import torch import torch.nn as nn import torch.nn.functional as F import argparse from sklearn.model_selection import train_test_split # num_comps specifies the number of trajectories to use in our training set # pair_delta=1 recovers original (just that pairwise comps can't be the same) # if all_pairs=True, rather than generating num_comps pairwise comps with pair_delta ranking difference, # we simply generate all (num_demos choose 2) possible pairs from the dataset. def create_training_data(demonstrations, num_comps, pair_delta, all_pairs): # collect training data max_traj_length = 0 training_obs = [] training_labels = [] num_demos = len(demonstrations) if all_pairs: for ti in range(num_demos): for tj in range(ti+1, num_demos): traj_i = demonstrations[ti] traj_j = demonstrations[tj] # In other words, label = (traj_i < traj_j) if ti > tj: label = 0 # 0 indicates that traj_i is better than traj_j else: label = 1 # 1 indicates that traj_j is better than traj_i training_obs.append((traj_i, traj_j)) training_labels.append(label) # We shouldn't need max_traj_length, since all our trajectories our fixed at length 200. max_traj_length = max(max_traj_length, len(traj_i), len(traj_j)) else: for n in range(num_comps): ti = 0 tj = 0 # only add trajectories that are different (in sorted reward ranking) by pair_delta while abs(ti - tj) < pair_delta: # pick two random demonstrations ti = np.random.randint(num_demos) tj = np.random.randint(num_demos) traj_i = demonstrations[ti] traj_j = demonstrations[tj] # In other words, label = (traj_i < traj_j) if ti > tj: label = 0 # 0 indicates that traj_i is better than traj_j else: label = 1 # 1 indicates that traj_j is better than traj_i training_obs.append((traj_i, traj_j)) training_labels.append(label) # We shouldn't need max_traj_length, since all our trajectories our fixed at length 200. max_traj_length = max(max_traj_length, len(traj_i), len(traj_j)) print("maximum traj length", max_traj_length) return training_obs, training_labels # NOTE: the 'handpicked' features are comprised of # 1) spoon-mouth distance # 2) amount of food particles in mouth # 3) amount of food particles on the floor class Net(nn.Module): def __init__(self, augmented=False, num_rawfeatures=25, state_action=False): super().__init__() if augmented and state_action: input_dim = 35 elif augmented: input_dim = num_rawfeatures + 3 elif state_action: input_dim = 32 else: input_dim = 3 self.fc1 = nn.Linear(input_dim, 1, bias=False) # We have a single linear layer, with no nonlinearities def cum_return(self, traj): '''calculate cumulative return of trajectory''' sum_rewards = 0 #compute forward pass of reward network (we parallelize across frames so batch size is length of full trajectory) r = self.fc1(traj) sum_rewards += torch.sum(r) return sum_rewards def forward(self, traj_i, traj_j): '''compute cumulative return for each trajectory and return logits''' cum_r_i = self.cum_return(traj_i) cum_r_j = self.cum_return(traj_j) return torch.cat((cum_r_i.unsqueeze(0), cum_r_j.unsqueeze(0)),0) def learn_reward(reward_network, optimizer, training_inputs, training_outputs, num_iter, l1_reg, checkpoint_dir, val_obs, val_labels, patience): # check if gpu available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Assume that we are on a CUDA machine, then this should print a CUDA device: print(device) loss_criterion = nn.CrossEntropyLoss() trigger_times = 0 prev_min_val_loss = 100 training_data = list(zip(training_inputs, training_outputs)) for epoch in range(num_iter): np.random.shuffle(training_data) training_obs, training_labels = zip(*training_data) for i in range(len(training_labels)): traj_i, traj_j = training_obs[i] # Question: why was it called labels? label = np.array([training_labels[i]]) traj_i = np.array(traj_i) traj_j = np.array(traj_j) traj_i = torch.from_numpy(traj_i).float().to(device) traj_j = torch.from_numpy(traj_j).float().to(device) label = torch.from_numpy(label).to(device) # zero out gradient optimizer.zero_grad() # forward + backward + optimize outputs = reward_network.forward(traj_i, traj_j) outputs = outputs.unsqueeze(0) # print("train outputs", outputs.shape) # print("train label", label.shape) # Calculate loss cross_entropy_loss = loss_criterion(outputs, label) # got rid of the l1_reg * abs_rewards from this line l1_loss = l1_reg * torch.linalg.vector_norm(torch.cat([param.view(-1) for param in reward_network.parameters()]), 1) loss = cross_entropy_loss + l1_loss # Backpropagate loss.backward() # Take one optimizer step optimizer.step() val_loss = calc_val_loss(reward_network, val_obs, val_labels) val_acc = calc_accuracy(reward_network, val_obs, val_labels) print("end of epoch {}: val_loss {}, val_acc {}".format(epoch, val_loss, val_acc)) # Early Stopping if val_loss > prev_min_val_loss: trigger_times += 1 print('trigger times:', trigger_times) if trigger_times >= patience: print("Early stopping.") return else: trigger_times = 0 print('trigger times:', trigger_times) print("saving model weights...") torch.save(reward_net.state_dict(), checkpoint_dir) print("Weights:", reward_net.state_dict()) prev_min_val_loss = min(prev_min_val_loss, val_loss) print("Finished training.") def calc_val_loss(reward_network, training_inputs, training_outputs): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") loss_criterion = nn.CrossEntropyLoss() losses = [] with torch.no_grad(): for i in range(len(training_inputs)): label = np.array([training_outputs[i]]) traj_i, traj_j = training_inputs[i] traj_i = np.array(traj_i) traj_j = np.array(traj_j) traj_i = torch.from_numpy(traj_i).float().to(device) traj_j = torch.from_numpy(traj_j).float().to(device) label = torch.from_numpy(label).to(device) #forward to get logits outputs = reward_network.forward(traj_i, traj_j) outputs = outputs.unsqueeze(0) # print("val outputs", outputs.shape) # print("val label", label.shape) loss = loss_criterion(outputs, label) losses.append(loss.item()) return np.mean(losses) def calc_accuracy(reward_network, training_inputs, training_outputs): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") loss_criterion = nn.CrossEntropyLoss() num_correct = 0. with torch.no_grad(): for i in range(len(training_inputs)): label = training_outputs[i] traj_i, traj_j = training_inputs[i] traj_i = np.array(traj_i) traj_j = np.array(traj_j) traj_i = torch.from_numpy(traj_i).float().to(device) traj_j = torch.from_numpy(traj_j).float().to(device) #forward to get logits outputs = reward_network.forward(traj_i, traj_j) _, pred_label = torch.max(outputs,0) if pred_label.item() == label: num_correct += 1. return num_correct / len(training_inputs) def predict_reward_sequence(net, traj): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") rewards_from_obs = [] with torch.no_grad(): for s in traj: r = net.cum_return(torch.from_numpy(np.array([s])).float().to(device)).item() rewards_from_obs.append(r) return rewards_from_obs def predict_traj_return(net, traj): return sum(predict_reward_sequence(net, traj)) if __name__ == "__main__": parser = argparse.ArgumentParser(description=None) # parser.add_argument('--env_name', default='', help='Select the environment name to run, i.e. pong') parser.add_argument('--reward_model_path', default='', help="name and location for learned model params, e.g. ./learned_models/breakout.params") parser.add_argument('--seed', default=0, help="random seed for experiments") parser.add_argument('--num_comps', default=0, type=int, help="number of pairwise comparisons") parser.add_argument('--num_demos', default=120, type=int, help="the number of demos to sample pairwise comps from") parser.add_argument('--num_epochs', default=100, type=int, help="number of training epochs") parser.add_argument('--lr', default=0.00005, type=float, help="learning rate") parser.add_argument('--weight_decay', default=0.0, type=float, help="weight decay") parser.add_argument('--l1_reg', default=0.0, type=float, help="l1 regularization") parser.add_argument('--patience', default=100, type=int, help="number of iterations we wait before early stopping") parser.add_argument('--pair_delta', default=10, type=int, help="min difference between trajectory rankings in our dataset") parser.add_argument('--all_pairs', dest='all_pairs', default=False, action='store_true', help="whether we generate all pairs from the dataset (num_demos choose 2)") # NOTE: type=bool doesn't work, value is still true. parser.add_argument('--state_action', dest='state_action', default=False, action='store_true', help="whether data consists of state-action pairs rather that just states") # NOTE: type=bool doesn't work, value is still true. parser.add_argument('--augmented', dest='augmented', default=False, action='store_true', help="whether data consists of states + linear features pairs rather that just states") # NOTE: type=bool doesn't work, value is still true. parser.add_argument('--num_rawfeatures', default=25, type=int, help="the number of raw features to keep in the augmented space") parser.add_argument('--handtuned_preferences', dest='handtuned_preferences', default=False, action='store_true', help="option to use preferences derived from the handtuned reward") # NOTE: type=bool doesn't work, value is still true. args = parser.parse_args() seed = args.seed torch.manual_seed(seed) ## HYPERPARAMS ## num_comps = args.num_comps # the number of pairwise comparisons we draw num_demos = args.num_demos lr = args.lr weight_decay = args.weight_decay l1_reg = args.l1_reg num_iter = args.num_epochs # num times through training data patience = args.patience pair_delta = args.pair_delta all_pairs = args.all_pairs state_action = args.state_action augmented = args.augmented num_rawfeatures = args.num_rawfeatures handtuned_preferences = args.handtuned_preferences ################# if augmented and state_action: demos = np.load("data/augmented_stateactions/demos.npy") demo_rewards = np.load("data/augmented_stateactions/demo_rewards.npy") demo_reward_per_timestep =
np.load("data/augmented_stateactions/demo_reward_per_timestep.npy")
numpy.load
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/schedulers.fit_flat_varied.ipynb (unless otherwise specified). __all__ = [] # Cell import warnings import numpy as np from fastcore.basics import even_mults from fastcore.foundation import patch, L from fastai.learner import Learner from fastai.callback.schedule import SchedCos, SchedNo, combine_scheds, ParamScheduler from ..basics import is_listish # Cell @patch def fit_flat_varied(self:Learner, n_epoch, start_lr=None, div_final=1e5, pct_start=0.75, wd=None, next_lr=None, change_by=None, change_time=1, change_sched=None, cbs=None, reset_opt=False): """ Fit `self.model` for `n_epoch` at flat `start_lr`, then change to flat `next_lr` at `change_by`, optionally with cosine annealing or custom `change_sched` over `change_time`. Final cosine annealing at `pct_start`. `n_epoch`, `start_lr`, `div_final`, `pct_start`, `wd`, `cbs`, & `reset_opt` are all same as fit_flat_cos from fast.ai. `next_lr` single or list of learning rates to switch to at change_by. Must be same length as `change_by`. `change_by` single or list of epochs or percent of steps to switch to `next_lr` by. Must be same length as `next_lr`. `change_time` if greater than 0 (percent of steps or epochs), how long to cosine anneal to `next_lr`. Can be single or list of same length as `next_lr`. `change_sched` optional single or list of fast.ai schedules. If `None` defaults to `SchedCos`. Must be same length as `next_lr`. `SchedPoly` must be passed as partial: `partial(SchedPoly, power=0.5)`. """ assert isinstance(next_lr, (float, slice)) or (is_listish(next_lr) and len(next_lr)>=1), '`next_lr` must be float, slice, or list of float or slice' assert isinstance(change_by, (int, float, slice)) or (is_listish(change_by) and len(change_by)>=1), '`change_by` must be int, float, slice, or list of int, float, or slice' if self.opt is None: self.create_opt() self.opt.set_hyper('lr', self.lr if start_lr is None else start_lr) start_lr = np.array([h['lr'] for h in self.opt.hypers]) params_len = len(start_lr) if not is_listish(next_lr): next_lr = [next_lr] if not is_listish(change_by): change_by = [change_by] change_by = [i/n_epoch if i>=1 else i for i in change_by] assert len(change_by)==len(next_lr), '`next_lr` & `change_by` need to be same length' if not is_listish(change_time): change_time = [change_time]*len(change_by) else: assert len(change_by)==len(change_time), '`change_time` list needs to be same length as `next_lr` & `change_by`' change_time = [i/n_epoch if i>=1 else i for i in change_time] if change_sched is not None: if not is_listish(change_sched): change_sched = [change_sched] assert len(change_by)==len(change_sched), '`next_lr` & `change_sched` need to be same length' pcts, scheds, last_lr, last_pct = [], [SchedNo(start_lr, start_lr)], start_lr, 0 for i, cb in enumerate(change_by): if cb < pct_start: nlr = next_lr[i] if isinstance(nlr, slice): if nlr.start: nlr = even_mults(nlr.start, nlr.stop, params_len) else: nlr = [nlr.stop/10]*(params_len-1) + [nlr.stop] nlr=
np.array(nlr)
numpy.array
import numpy as np import nifty import vigra def graph_watershed(graph, edge_weigths, seed_nodes): """ """ # TODO do we need to cast to 'nifty.graph.undirectedGraph' if we get a rag? assert len(edge_weigths) == graph.numberOfEdges assert len(seed_nodes) == graph.numberOfNodes # run graph watershed node_labels = nifty.graph.edgeWeightedWatershedsSegmentation(graph, seed_nodes, edge_weigths) return node_labels def graph_size_filter(graph, edge_weigths, node_sizes, min_size, node_labels=None, relabel=False): """ """ n_nodes = graph.numberOfNodes if node_labels is None: seeds = np.zeros(n_nodes, dtype='uint64') assert n_nodes == len(node_sizes) keep_nodes = node_sizes >= min_size seeds[keep_nodes] =
np.arange(0, n_nodes)
numpy.arange
import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm from wireframe.utils import argsort2d DX = [0, 0, 1, -1, 1, 1, -1, -1] DY = [1, -1, 0, 0, 1, -1, 1, -1] def ap(tp, fp, npos): recall = tp / npos precision = tp / np.maximum(tp + fp, 1e-9) recall = np.concatenate(([0.0], recall, [1.0])) precision = np.concatenate(([0.0], precision, [0.0])) for i in range(precision.size - 1, 0, -1): precision[i - 1] = max(precision[i - 1], precision[i]) i = np.where(recall[1:] != recall[:-1])[0] return np.sum((recall[i + 1] - recall[i]) * precision[i + 1]) def eval_depth(pred, pred_depth, gt, gt_depth, max_distance): confidence = pred[:, -1] sorted_ind = np.argsort(-confidence) nd = len(pred) pred = pred[sorted_ind, :-1] pred_depth = pred_depth[sorted_ind] d = np.sqrt(np.sum(pred ** 2, 1)[:, None] + np.sum(gt ** 2, 1)[None, :] - 2 * pred @ gt.T) choice = np.argmin(d, 1) hit = np.zeros(len(gt), np.bool) dist = np.min(d, 1) depth_diff = np.zeros(len(pred)) for i in range(nd): if dist[i] < max_distance and not hit[choice[i]]: hit[choice[i]] = True a = np.maximum(-pred_depth[i], 1e-5) b = -gt_depth[choice[i]] depth_diff[i] = np.log(a) - np.log(b) n = np.maximum(np.sum(hit), 1) rst = np.sum(depth_diff @ depth_diff.T) / n - np.sum(depth_diff) * np.sum(depth_diff) / (n * n) return rst def mAP_jlist(v0, v1, max_distance, im_ids, weight, pred_dirs=None, gt_dirs=None, weight_dirs=None): if len(v0) == 0: return 0 # whether simultaneously evaluate direction prediction eval_dir = False if pred_dirs is not None: assert (gt_dirs is not None) and (weight_dirs is not None) eval_dir = True weight_dir_sum = sum([np.sum(j) for j in weight_dirs]) gt_num = sum([np.sum(len(j)) for j in weight_dirs]) weight_dirs = [_ / weight_dir_sum * gt_num for _ in weight_dirs] v0 = np.array(v0) v1 = np.array(v1) weight_sum = sum([np.sum(j) for j in weight]) gt_num = sum([np.sum(len(j)) for j in weight]) weight = [_ / weight_sum * gt_num for _ in weight] confidence = v0[:, -1] # sort by confidence sorted_ind = np.argsort(-confidence) v0 = v0[sorted_ind, :] im_ids = im_ids[sorted_ind] nd = len(im_ids) tp, fp = np.zeros(nd, dtype=np.float), np.zeros(nd, dtype=np.float) hit = [[False for _ in j] for j in v1] if eval_dir: pred_dirs = pred_dirs[sorted_ind] tp_dir, fp_dir = np.zeros(nd, dtype=np.float), np.zeros(nd, dtype=np.float) hit_dir = [[False for _ in j] for j in v1] # go down dets and mark TPs and FPs for i in range(nd): gt_juns = v1[im_ids[i]] pred_juns = v0[i][:-1] if len(gt_juns) > 0: # compute overlaps dists = np.linalg.norm((pred_juns[None, :] - gt_juns), axis=1) choice = np.argmin(dists) dist = np.min(dists) if dist < max_distance and not hit[im_ids[i]][choice]: tp[i] = weight[im_ids[i]][choice] hit[im_ids[i]][choice] = True # theta is correct only when junction is correct first if eval_dir: gt_dir = gt_dirs[im_ids[i]][choice] pred_dir = pred_dirs[i] d_theta = np.fmod(gt_dir - pred_dir, 2 * np.pi) d_theta = d_theta + 2 * np.pi if d_theta < 0 else d_theta d_theta = np.minimum(np.abs(d_theta), np.abs(2 * np.pi - d_theta)) if d_theta < 2 * np.pi / 48.0 and \ not hit_dir[im_ids[i]][choice]: tp_dir[i] = weight_dirs[im_ids[i]][choice] hit_dir[im_ids[i]][choice] = True else: fp_dir[i] = 1 else: fp[i] = 1 if eval_dir: fp_dir[i] = 1 tp = np.cumsum(tp) fp = np.cumsum(fp) if eval_dir: tp_dir = np.cumsum(tp_dir) fp_dir = np.cumsum(fp_dir) return ap(tp, fp, gt_num), ap(tp_dir, fp_dir, gt_num) else: return ap(tp, fp, gt_num) def nms_junction(heatmap, delta=1): heatmap = heatmap.copy() disable = np.zeros_like(heatmap, dtype=np.bool) for x, y in argsort2d(heatmap): for dx, dy in zip(DX, DY): xp, yp = x + dx, y + dy if not (0 <= xp < heatmap.shape[0] and 0 <= yp < heatmap.shape[1]): continue if heatmap[x, y] >= heatmap[xp, yp]: disable[xp, yp] = True heatmap[disable] = 0 return heatmap def ap_jheatmap(pred, truth, distances, im_ids, weight, pred_dir=None, gt_dir=None, weight_dir=None): # note the distance is junction prediction requirement # theta requirement is always fixed for now if pred_dir is not None: assert (gt_dir is not None) and (weight_dir is not None) ap_jt, ap_dirt = [], [] for d in distances: j, d = mAP_jlist(pred, truth, d, im_ids, weight, pred_dir, gt_dir, weight_dir) ap_jt.append(j) ap_dirt.append(d) return sum(ap_jt) / len(ap_jt) * 100, \ sum(ap_dirt) / len(ap_dirt) * 100 else: return sum(mAP_jlist(pred, truth, d, im_ids, weight) for d in distances) / len(distances) * 100 def post_jheatmap(heatmap, offset=None, delta=1, dir_map=None, jdep_map=None): # heatmap = nms_junction(heatmap, delta=delta) # only select the best 1000 junctions for efficiency v0 = argsort2d(-heatmap)[:1000] confidence = -np.sort(-heatmap.ravel())[:1000] keep_id = np.where(confidence >= 1e-2)[0] if len(keep_id) == 0: return np.zeros((0, 3)) v0 = v0[keep_id] confidence = confidence[keep_id] if offset is not None: v0 = np.array([v + offset[:, v[0], v[1]] for v in v0]) v0 = np.hstack((v0, confidence[:, np.newaxis])) if dir_map is not None: assert offset is None # take the theta corresponding to v0 # currently only support T direction so if len(dir_map.shape) == 2: dir = np.array([dir_map[int(v[0]), int(v[1])] for v in v0]) else: raise NotImplementedError return v0, dir if jdep_map is not None: if len(jdep_map.shape) == 2: jdep = np.array([jdep_map[int(v[0]), int(v[1])] for v in v0]) else: raise NotImplementedError return v0, jdep return v0 def get_confusion_mat(pred, gt): index = gt * 2 + pred label_count = np.bincount(index.reshape(-1).astype(np.int32)) confusion_mat =
np.zeros((2, 2))
numpy.zeros
# # .uni files IO # import gzip, struct import numpy as np from collections import namedtuple def _read_particle_data(bytestream, head, data_type=None): # data_type = {None: BasicParticleSystem; "float32": Real; "int32": Int} assert(head['bytesPerElement']==16 or head['bytesPerElement']==12 or head['bytesPerElement']==4) if(head['elementType']==0): # BasicParticleSystem print('(BasicParticleSystem) ', end='') data = np.frombuffer(bytestream.read(), dtype=np.dtype([('f1',(np.float32,3)),('f2',(np.int32,1))]))['f1'] else: # head['elementType']==1: ParticleDataImpl<T>, where T = {float32: Real(4) or Vec3(12); int32: Int(4)} print('(ParticleDataImpl<T={}{}>) '.format(data_type, 'x3' if (head['bytesPerElement']==12) else ''), end='') data = np.reshape(np.frombuffer(bytestream.read(), dtype=data_type), (-1, 3 if (head['bytesPerElement']==12) else 1)) return data def _read_grid_data(bytestream, head, data_type=None): assert(head['bytesPerElement']==12 or head['bytesPerElement']==4) print('(Grid<T={}{}>) '.format(data_type, 'x3' if (head['bytesPerElement']==12) else ''), end='') data = np.frombuffer(bytestream.read(), dtype=data_type) if head['bytesPerElement']==12: return data.reshape((head['dimX'], head['dimY'], head['dimZ'], 3)) else: return data.reshape((head['dimX'], head['dimY'], head['dimZ'])) def _read_particle_head(bytestream): ID = bytestream.read(4) # NOTE: useless # unpack header struct object head = namedtuple('UniPartHeader', 'dim, dimX, dimY, dimZ, elementType, bytesPerElement, info, timestamp') # convert to namedtuple and then directly to a dict head = head._asdict(head._make(struct.unpack('iiiiii256sQ', bytestream.read(288)))) return head def _read_grid_head(bytestream): ID = bytestream.read(4) # unpack header struct object head = namedtuple('UniHeader', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp') # convert to namedtuple and then directly to a dict head = head._asdict(head._make(struct.unpack('iiiiii252siQ', bytestream.read(288)))) return head # use this to read the .uni file. It will return the header as dictionary and the content as a numpy array def read_particles(filename, data_type=None): print('Reading {} ... '.format(filename), end='') with gzip.open(filename, 'rb') as bytestream: head = _read_particle_head(bytestream) data = _read_particle_data(bytestream, head, data_type) print('Done.') return head, data def read_grid(filename, data_type=None): print('Reading {} ... '.format(filename), end='') with gzip.open(filename, 'rb') as bytestream: head = _read_grid_head(bytestream) data = _read_grid_data(bytestream, head, data_type) print('Done.') return head, data def drop_zdim(data): return
np.delete(data, -1, 1)
numpy.delete
import matplotlib matplotlib.use('Agg') import numpy as np import os import matplotlib.pyplot as plt import matplotlib.dates as mdates import scipy.io import sunpy.time import pickle import seaborn as sns import shutil import datetime from astropy.time import Time import pdb from sympy.solvers import solve from sympy import Symbol import multiprocessing from matplotlib.collections import LineCollection from matplotlib.colors import ListedColormap, BoundaryNorm import glob import h5py import astropy.units as u # ###################################################### functions def getShockNormalAngle(pos, longell, rell, timeind, frameTime, ArrTime, plotLines): # print('TI: ', mdates.num2date(frameTime)) # print('AT: ', mdates.num2date(ArrTime)) # print('Tdiff [min]: ', TimeDiff) LonEarth = pos.earth[1, timeind]# + 0.55 # print('LonEll: ', longell) # print('lonEarth: ', LonEarth) minDiffLonEll = min(abs(longell-LonEarth)) indMinLon = np.where(abs(longell-LonEarth) == minDiffLonEll)[0] EarthHit = False if indMinLon < np.size(longell)-1 and indMinLon > 0: EarthHit = True TimeDiff = 100 # if ArrTime != b' -1': if ArrTime != float('Nan'): TimeDiff = abs(frameTime - ArrTime)*60*24 if EarthHit and TimeDiff < 30: REarth = pos.earth[0, timeind] # plt.plot([0, LonEarth], [0, REarth], color='pink', lw=0.8, alpha=1) #if plotLines: # plt.scatter(longell[indMinLon-1], rell[indMinLon-1], s=2) # plt.scatter(longell[indMinLon+1], rell[indMinLon+1], s=2) x = rell[indMinLon]*np.cos(longell[indMinLon]) y = rell[indMinLon]*np.sin(longell[indMinLon]) x = REarth*np.cos(LonEarth) y = REarth*np.sin(LonEarth) x1 = rell[indMinLon-1]*np.cos(longell[indMinLon-1]) x2 = rell[indMinLon+1]*np.cos(longell[indMinLon+1]) y1 = rell[indMinLon-1]*np.sin(longell[indMinLon-1]) y2 = rell[indMinLon+1]*np.sin(longell[indMinLon+1]) k = (y1-y2)/(x1-x2) d = y1-k*x1 #normale: steigung = -1/k fact = 1 #if x[ind] < 0: # fact = -1 kNew = -1/k dNew = y-kNew*x dCent = 0 kCent = y/x alpha = np.arctan(kCent) # print('kCent [°]: ', np.rad2deg(alpha)) # alpha = arctan(abs((m1-m2)/(1+m1*m2))) angleDiff = np.arctan((kNew-kCent)/(1+kNew*kCent)) angleDiffDeg = np.rad2deg(angleDiff) alpha = np.arctan(kNew) # print('kNew [°]: ', np.rad2deg(alpha)) dist = 0.2 #print('x: ', x) #print('y: ', y) #print('rell: ', rell[indMinLon]) #print('longell: ', longell[indMinLon]) tmpXN = dist*np.cos(alpha) + x tmpYN = dist*np.sin(alpha) + y rellNew = np.sqrt(tmpXN ** 2 + tmpYN ** 2) longellNew = np.arctan2(tmpYN, tmpXN) r1 = np.sqrt(x1 ** 2 + y1 ** 2) l1 = np.arctan2(y1, x1) r2 = np.sqrt(x2 ** 2 + y2 ** 2) l2 = np.arctan2(y2, x2) # if plotLines: # plt.plot([LonEarth, longellNew], [REarth, rellNew], color='black', lw=0.3, alpha=1) # print('angle Diff [°]= ', angleDiffDeg) return angleDiffDeg[0] def plot_bgsw_speed(time, speed, angle, label, vmin, vmax, plotPath): #arr = np.array(np.size(time_b), max(speed_b) - min(speed_b)) ysize = np.int(max(speed) - min(speed)) xsize = np.size(time) arr = np.zeros(shape=(xsize, ysize)) for i in np.arange(0, xsize): arr[i,:] = speed[i] elons = np.zeros(xsize) for i in np.arange(0, np.size(elons)): elons[i] = i +1 fig = plt.figure(figsize=(16, 5)) ax1 = fig.add_subplot(111) ax1.grid(b = None, axis='both') #cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('rainbow'), vmin=vmin, vmax=vmax, aspect = (xsize / ysize), origin='lower') cf = ax1.imshow(arr.T, cmap=plt.cm.get_cmap('coolwarm'), vmin=vmin, vmax=vmax, aspect = (xsize / ysize), origin='lower') #ax = plt.axes() plt.yticks([]) plt.xticks(np.arange(xsize), time, rotation = 45) ax1.xaxis.set_major_locator(plt.MaxNLocator(np.int(xsize/8))) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis ax2.grid(b = None, axis='both') ax2.set_ylabel('Elongation [°]') # we already handled the x-label with ax1 ax2.plot(time, np.rad2deg(angle), 'black') ax2.yaxis.set_ticks_position('left') ax2.yaxis.set_label_position('left') ax2.xaxis.set_major_locator(plt.MaxNLocator(np.int(xsize/8))) ax2.legend([label], handlelength=0, handletextpad=0, loc='upper left') cax = plt.axes([-0.01, 0.125, 0.02, 0.75]) cbar = plt.colorbar(cf, cax=cax, ticks=np.arange(vmin, vmax, 50)) cbar.set_label('Solar wind speed [km/s]') plt.savefig(plotPath + 'BGSW_' + label + '.png', dpi=300, bbox_inches='tight') # clears plot window plt.clf() def plot_BGSW_tangent(path): ###################################################### ###################################################### # FOR a nicer plot see 'PlotAmbientSolarWinds.ipynb' # ###################################################### ###################################################### #path = 'HI_animate/events/test/20100203_AB/' [tpWind_a, tpWind_b, et_time_a, et_time_b, angle_a, angle_b, tp_a, tp_b] = pickle.load( open(path + 'tpWind_AB.p', "rb")) #[tpWind_a, et_time_a] = pickle.load( # open('HI_animate/events/test/20100203_A/tpWind_A.p', "rb")) fig = plt.figure(figsize=(16, 8)) time_a = [] speed_a = [] for i in np.arange(0, np.int(np.size(tpWind_a)/2)): #print((tpWind_a[i][0])[0:19]) time_a.append((tpWind_a[i][0])[0:19]) speed_a.append(tpWind_a[i][1]) time_b = [] speed_b = [] for i in np.arange(0, np.int(np.size(tpWind_b)/2)): time_b.append((tpWind_b[i][0])[0:19]) speed_b.append(tpWind_b[i][1]) #x = time_a x = mdates.date2num(Time.strptime(time_a, '%Y-%m-%d %H:%M:%S').datetime) x = x - x.min() y = np.arange(0, len(x), 1) y = np.array(np.rad2deg(angle_a)) speeds = np.array(speed_a) ymin = 0 ymax = np.round(np.nanmax([np.rad2deg(angle_a), np.rad2deg(angle_b)]),-1)+10 # Create a set of line segments so that we can color them individually # This creates the points as a N x 1 x 2 array so that we can stack points # together easily to get the segments. The segments array for line collection # needs to be (numlines) x (points per line) x 2 (for x and y) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) plt.rcParams.update({'font.size': 21}) fig, axs = plt.subplots(2, 1, sharex=True, sharey=True, figsize=[16,10]) # Create a continuous norm to map from data points to colors norm = plt.Normalize(vmin, vmax) lc = LineCollection(segments, cmap='coolwarm', norm=norm) # Set the values used for colormapping lc.set_array(speeds) lc.set_linewidth(7) line = axs[0].add_collection(lc) #fig.colorbar(line, ax=axs[0]) axs[0].set_xlim(x.min(), x.max()) axs[0].set_ylim(ymin, ymax) axs[0].set_ylabel('Elongation [°]') #x = time_a x = mdates.date2num(Time.strptime(time_b, '%Y-%m-%d %H:%M:%S').datetime) x = x - x.min() y = np.array(np.rad2deg(angle_b)) speeds = np.array(speed_b) # Create a set of line segments so that we can color them individually # This creates the points as a N x 1 x 2 array so that we can stack points # together easily to get the segments. The segments array for line collection # needs to be (numlines) x (points per line) x 2 (for x and y) points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) norm = plt.Normalize(vmin, vmax) lc = LineCollection(segments, cmap='coolwarm', norm=norm) # Set the values used for colormapping lc.set_array(speeds) lc.set_linewidth(7) line = axs[1].add_collection(lc) axs[1].set_xlim(x.min(), x.max()) axs[1].set_ylim(ymin, ymax) plt.yticks(np.arange(ymin, ymax, 20.0)) #plt.xticks(np.arange(x.min(), x.max(), 0.083)) plt.xticks(x[0::12], time_a[0::12]) axs[1].set_ylabel('Elongation [°]') plt.setp(axs[1].xaxis.get_majorticklabels(), rotation=25) #fig.text(0.02, 0.5, 'Elongation [°]', ha='center', va='center', rotation='vertical') cax = plt.axes([0.92, 0.125, 0.015, 0.755]) cbar = plt.colorbar(line, cax=cax, ticks=np.arange(vmin, vmax, 40)) cbar.set_label('Solar wind speed [km/s]') axs[0].text(0.2, ymax-5, 'a)', fontsize=28, ha='center', va='top', wrap=True) axs[1].text(0.2, ymax-5, 'b)', fontsize=28, ha='center', va='top', wrap=True) fig.savefig(path + '/BGSW_elon.png', bbox_inches="tight") fig.clf() plt.close('all') print('done') # ###################################################### functions # for reading catalogues def getcat(filename): print('reading CAT ' + filename) cat = scipy.io.readsav(filename) # , verbose='false') print('done reading CAT') return cat def decode_array(bytearrin): # for decoding the strings from the IDL .sav file to a list of python # strings, not bytes make list of python lists with arbitrary length bytearrout = ['' for x in range(len(bytearrin))] for i in range(0, len(bytearrin) - 1): bytearrout[i] = bytearrin[i].decode() # has to be np array so to be used with numpy "where" bytearrout = np.array(bytearrout) return bytearrout def time_to_num_cat(time_in): # for time conversion from catalogue .sav to numerical time # this for 1-minute data or lower time resolution # for all catalogues # time_in is the time in format: 2007-11-17T07:20:00 or 2007-11-17T07:20Z # for times help see: # http://docs.sunpy.org/en/latest/guide/time.html # http://matplotlib.org/examples/pylab_examples/date_demo2.html j = 0 # time_str=np.empty(np.size(time_in),dtype='S19') time_str = ['' for x in range(len(time_in))] # =np.chararray(np.size(time_in),itemsize=19) time_num = np.zeros(np.size(time_in)) for i in time_in: # convert from bytes (output of scipy.readsav) to string time_str[j] = time_in[j][0:16].decode() + ':00' year = int(time_str[j][0:4]) time_str[j] # convert time to sunpy friendly time and to matplotlibdatetime # only for valid times so 9999 in year is not converted # pdb.set_trace() if year < 2100: time_num[j] = mdates.date2num(Time.strptime(time_str[j], '%Y-%m-%dT%H:%M:%S').datetime) j = j + 1 # the date format in matplotlib is e.g. 735202.67569444 # this is time in days since 0001-01-01 UTC, plus 1. # return time_num which is already an array and convert the list of strings # to an array return time_num, np.array(time_str) def roundTime(dt=None, roundTo=60): # Round a datetime object to any time lapse in seconds # dt : datetime.datetime object, default now. # roundTo : Closest number of seconds to round to, default 1 minute. # Author: <NAME> 2012 - Use it as you want but don't blame me. if dt is None: dt = datetime.datetime.now() seconds = (dt.replace(tzinfo=None) - dt.min).seconds rounding = (seconds + roundTo / 2) // roundTo * roundTo return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond) def getTangentPoint(a, b, xc, yc, px, py, elon, sc, plot): tilt = 90 pxOri = px pyOri = py px = px - xc py = py - yc ti = np.deg2rad(elon) pxRot = px*np.cos(ti) - py*np.sin(ti) pyRot = px*np.sin(ti) + py*np.cos(ti) px = pxRot py = pyRot ellipseResolution = 211 circ_ang = ((np.arange(ellipseResolution) * 2 - (ellipseResolution-1)) * np.pi / 180) xe = b * np.cos(circ_ang) # Parameterized equation of ellipse ye = a * np.sin(circ_ang) cosang = np.cos(tilt * np.pi / 180) sinang = np.sin(tilt * np.pi / 180) xell = xe * cosang - ye * sinang # Rotate to desired # position angle yell = xe * sinang + ye * cosang if py != 0: xSolve = Symbol('xSolve') xSol = solve(b**2*xSolve**2 + a**2*((a**2*b**2-b**2*xSolve*px)/(a**2*py))**2-a**2*b**2, xSolve) #print(xSol) xs = [] for xst in xSol: xs.append(float(xst)) #print(xs) xs =[np.max(xs)] ys = [] ytmp = Symbol('ytmp') for xtmp in xs: tmp = solve((b**2*xtmp**2 + a**2*ytmp**2 - a**2*b**2)) ys.append(tmp) if sc == 'A': if np.max(xell) < px: ys =
np.min(ys)
numpy.min
""" Mask R-CNN Common utility functions and classes. Copyright (c) 2017 Matterport, Inc. Licensed under the MIT License (see LICENSE_MATTERPORT for details) Written by <NAME> Copyright (c) 2021 Skinet Team Licensed under the MIT License (see LICENSE for details) Updated/Modified by <NAME> """ import json import logging import os import random import shutil import urllib.request import warnings import zipfile from distutils.version import LooseVersion import cv2 import numpy as np import scipy import skimage.color import skimage.io import skimage.transform from mrcnn.Config import Config from mrcnn.visualize import create_multiclass_mask from datasetTools import datasetDivider as dD # URL from which to download the latest trained weights WEIGHTS_URL = [] ############################################################ # Masks ############################################################ def reduce_memory(results, config: Config, allow_sparse=True): """ Minimize all masks in the results dict from inference :param results: dict containing results of the inference :param config: the config object :param allow_sparse: if False, will only keep biggest region of a mask :return: """ _masks = results['masks'] _bbox = results['rois'] if not allow_sparse: emptyMasks = [] for idx in range(results['masks'].shape[-1]): mask = unsparse_mask(results['masks'][:, :, idx]) if mask is None: emptyMasks.append(idx) else: results['masks'][:, :, idx] = mask if len(emptyMasks) > 0: results['scores'] = np.delete(results['scores'], emptyMasks) results['class_ids'] = np.delete(results['class_ids'], emptyMasks) results['masks'] = np.delete(results['masks'], emptyMasks, axis=2) results['rois'] = np.delete(results['rois'], emptyMasks, axis=0) results['rois'] = extract_bboxes(results['masks']) results['masks'] = minimize_mask(results['rois'], results['masks'], config.get_mini_mask_shape()) return results def get_mask_area(mask, verbose=0): """ Computes mask area :param mask: the array representing the mask :param verbose: 0 : nothing, 1+ : errors/problems :return: the area of the mask and verbose output (None when nothing to print) """ maskHistogram = dD.getBWCount(mask) display = None if verbose > 0: nbPx = mask.shape[0] * mask.shape[1] tempSum = maskHistogram[0] + maskHistogram[1] if tempSum != nbPx: display = "Histogram pixels {} != total pixels {}".format(tempSum, nbPx) return maskHistogram[1], display def unsparse_mask(base_mask): """ Return mask with only its biggest part :param base_mask: the mask image as np.bool or np.uint8 :return: the main part of the mask as a same shape image and type """ # http://www.learningaboutelectronics.com/Articles/How-to-find-the-largest-or-smallest-object-in-an-image-Python-OpenCV.php # https://stackoverflow.com/a/19222620/9962046 # Convert to np.uint8 if not before processing convert = False if type(base_mask[0, 0]) is np.bool_: convert = True base_mask = base_mask.astype(np.uint8) * 255 # Padding the mask so that parts on edges will get correct area base_mask = np.pad(base_mask, 1, mode='constant', constant_values=0) res = np.zeros_like(base_mask, dtype=np.uint8) # Detecting contours and keeping only one with biggest area contours, _ = cv2.findContours(base_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours) > 0: if len(contours) > 1: # If only one region, reconstructing mask is useless biggest_part = sorted(contours, key=cv2.contourArea, reverse=True)[0] # Drawing the biggest part on the result mask cv2.fillPoly(res, pts=[biggest_part], color=255) else: res = base_mask # Removing padding of the mask res = res[1:-1, 1:-1] return res.astype(np.bool) if convert else res else: return None ############################################################ # Bounding Boxes ############################################################ def in_roi(roi_to_test, roi, epsilon=0): """ Tests if the RoI to test is included in the given RoI :param roi_to_test: the RoI/bbox to test :param roi: the RoI that should include the one to test :param epsilon: margin of the RoI to allow boxes that are not exactly inside :return: True if roi_to_test is included in roi """ res = True i = 0 while i < 4 and res: res = res and (roi[i % 2] - epsilon <= roi_to_test[i] <= roi[i % 2 + 2] + epsilon) i += 1 return res def get_bbox_area(roi): """ Returns the bbox area :param roi: the bbox to use :return: area of the given bbox """ return (roi[3] - roi[1]) * (roi[2] - roi[0]) def get_bboxes_intersection(roiA, roiB): """ Computes the intersection area of two bboxes :param roiA: the first bbox :param roiB: the second bbox :return: the area of the intersection """ xInter = min(roiA[3], roiB[3]) - max(roiA[1], roiB[1]) yInter = min(roiA[2], roiB[2]) - max(roiA[0], roiB[0]) return max(xInter, 0) * max(yInter, 0) def global_bbox(roiA, roiB): """ Returns the bbox enclosing two given bboxes :param roiA: the first bbox :param roiB: the second bbox :return: the enclosing bbox """ return np.array([min(roiA[0], roiB[0]), min(roiA[1], roiB[1]), max(roiA[2], roiB[2]), max(roiA[3], roiB[3])]) def shift_bbox(roi, customShift=None): """ Shifts bbox coordinates so that min x and min y equal 0 :param roi: the roi/bbox to transform :param customShift: custom x and y shift as (yShift, xShift) :return: the shifted bbox """ yMin, xMin, yMax, xMax = roi if customShift is None: return np.array([0, 0, yMax - yMin, xMax - xMin]) else: return np.array([max(yMin - customShift[0], 0), max(xMin - customShift[1], 0), max(yMax - customShift[0], 0), max(xMax - customShift[1], 0)]) def expand_masks(mini_mask1, roi1, mini_mask2, roi2): """ Expands two masks while keeping their relative position :param mini_mask1: the first mini mask :param roi1: the first mask bbox/roi :param mini_mask2: the second mini mask :param roi2: the second mask bbox/roi :return: mask1, mask2 """ roi1And2 = global_bbox(roi1, roi2) shifted_roi1And2 = shift_bbox(roi1And2) shifted_roi1 = shift_bbox(roi1, customShift=roi1And2[:2]) shifted_roi2 = shift_bbox(roi2, customShift=roi1And2[:2]) mask1 = expand_mask(shifted_roi1, mini_mask1, shifted_roi1And2[2:]) mask2 = expand_mask(shifted_roi2, mini_mask2, shifted_roi1And2[2:]) return mask1, mask2 def extract_bboxes(mask): """Compute bounding boxes from masks. mask: [height, width, num_instances]. Mask pixels are either 1 or 0. Returns: bbox array [num_instances, (y1, x1, y2, x2)]. """ soleMask = False if len(mask.shape) != 3: _mask = np.expand_dims(mask, 2) soleMask = True else: _mask = mask boxes = np.zeros([_mask.shape[-1], 4], dtype=np.int32) for i in range(_mask.shape[-1]): m = _mask[:, :, i] # Bounding box. horizontal_indicies = np.where(np.any(m, axis=0))[0] vertical_indicies = np.where(np.any(m, axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] # x2 and y2 should not be part of the box. Increment by 1. x2 += 1 y2 += 1 else: # No mask for this instance. Might happen due to # resizing or cropping. Set bbox to zeros x1, x2, y1, y2 = 0, 0, 0, 0 boxes[i] = np.array([y1, x1, y2, x2]).astype(np.int32) return boxes[0] if soleMask else boxes def compute_iou(box, boxes, box_area, boxes_area): """Calculates IoU of the given box with the array of the given boxes. box: 1D vector [y1, x1, y2, x2] boxes: [boxes_count, (y1, x1, y2, x2)] box_area: float. the area of 'box' boxes_area: array of length boxes_count. Note: the areas are passed in rather than calculated here for efficiency. Calculate once in the caller to avoid duplicate work. """ # Calculate intersection areas y1 = np.maximum(box[0], boxes[:, 0]) y2 = np.minimum(box[2], boxes[:, 2]) x1 = np.maximum(box[1], boxes[:, 1]) x2 = np.minimum(box[3], boxes[:, 3]) intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) union = box_area + boxes_area[:] - intersection[:] iou = intersection / union return iou def compute_overlaps(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1, boxes2: [N, (y1, x1, y2, x2)]. For better performance, pass the largest set first and the smaller second. """ # TODO Possible improvements: using another structure to save overlaps as a lot of bboxes overlaps with only a few ? # Areas of anchors and GT boxes area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) # Compute overlaps to generate matrix [boxes1 count, boxes2 count] # Each cell contains the IoU value. overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) for i in range(overlaps.shape[1]): box2 = boxes2[i] overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) return overlaps def compute_overlaps_masks(masks1, boxes1, masks2, boxes2): """Computes IoU overlaps between two sets of masks. masks1, masks2: [Height, Width, instances] """ res = np.zeros((masks1.shape[-1], masks2.shape[-1])) # If either set of masks is empty return empty result if masks1.shape[-1] == 0 or masks2.shape[-1] == 0: return res matching_boxes = compute_overlaps(boxes1, boxes2) idx, idy = np.nonzero(matching_boxes) matching_boxes = set(zip(idx, idy)) for idMask1, idMask2 in matching_boxes: mask1, mask2 = expand_masks(masks1[:, :, idMask1], boxes1[idMask1], masks2[:, :, idMask2], boxes2[idMask2]) mask1Area, _ = get_mask_area(mask1) mask2Area, _ = get_mask_area(mask2) if mask1Area != 0 and mask2Area != 0: mask1AND2 = np.logical_and(mask1, mask2) intersection, _ = get_mask_area(mask1AND2) union = mask1Area + mask2Area - intersection res[idMask1, idMask2] = intersection / union return res def non_max_suppression(boxes, scores, threshold): """ Performs non-maximum suppression :param boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box. :param scores: 1-D array of box scores. :param threshold: Float. IoU threshold to use for filtering. :return: indices of kept boxes """ assert boxes.shape[0] > 0 if boxes.dtype.kind != "f": boxes = boxes.astype(np.float32) # Compute box areas y1 = boxes[:, 0] x1 = boxes[:, 1] y2 = boxes[:, 2] x2 = boxes[:, 3] area = (y2 - y1) * (x2 - x1) # Get indices of boxes sorted by scores (highest first) ixs = scores.argsort()[::-1] pick = [] while len(ixs) > 0: # Pick top box and add its index to the list i = ixs[0] pick.append(i) # Compute IoU of the picked box with the rest iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]]) # Identify boxes with IoU over the threshold. This # returns indices into ixs[1:], so add 1 to get # indices into ixs. remove_ixs = np.where(iou > threshold)[0] + 1 # Remove indices of the picked and overlapped boxes. ixs = np.delete(ixs, remove_ixs) ixs = np.delete(ixs, 0) return np.array(pick, dtype=np.int32) ############################################################ # Dataset ############################################################ class Dataset(object): """The base class for dataset classes. To use it, create a new class that adds functions specific to the dataset you want to use. For example: class CatsAndDogsDataset(Dataset): def load_cats_and_dogs(self): ... def load_mask(self, image_id): ... def image_reference(self, image_id): ... See COCODataset and ShapesDataset as examples. """ def __init__(self, class_map=None): self._image_ids = [] self.image_info = [] # Background is always the first class self.class_info = [{"source": "", "id": 0, "name": "BG"}] self.source_class_ids = {} def add_class(self, source, class_id, class_name): assert "." not in source, "Source name cannot contain a dot" # Does the class exist already? for info in self.class_info: if info['source'] == source and info["id"] == class_id: # source.class_id combination already available, skip return # Add the class self.class_info.append({ "source": source, "id": class_id, "name": class_name, }) def add_image(self, source, image_id, path, **kwargs): image_info = { "id": image_id, "source": source, "path": path, } image_info.update(kwargs) self.image_info.append(image_info) def image_reference(self, image_id): """Return a link to the image in its source Website or details about the image that help looking it up or debugging it. Override for your dataset, but pass to this function if you encounter images not in your dataset. """ return "" def prepare(self, class_map=None): """Prepares the Dataset class for use. TODO: class map is not supported yet. When done, it should handle mapping classes from different datasets to the same class ID. """ def clean_name(name): """Returns a shorter version of object names for cleaner display.""" return ",".join(name.split(",")[:1]) # Build (or rebuild) everything else from the info dicts. self.num_classes = len(self.class_info) self.class_ids = np.arange(self.num_classes) self.class_names = [clean_name(c["name"]) for c in self.class_info] self.num_images = len(self.image_info) self._image_ids = np.arange(self.num_images) # Mapping from source class and image IDs to internal IDs self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.class_info, self.class_ids)} self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.image_info, self.image_ids)} # Map sources to class_ids they support self.sources = list(set([i['source'] for i in self.class_info])) self.source_class_ids = {} # Loop over datasets for source in self.sources: self.source_class_ids[source] = [] # Find classes that belong to this dataset for i, info in enumerate(self.class_info): # Include BG class in all datasets if i == 0 or source == info['source']: self.source_class_ids[source].append(i) def map_source_class_id(self, source_class_id): """Takes a source class ID and returns the int class ID assigned to it. For example: dataset.map_source_class_id("coco.12") -> 23 """ return self.class_from_source_map[source_class_id] def get_source_class_id(self, class_id, source): """Map an internal class ID to the corresponding class ID in the source dataset.""" info = self.class_info[class_id] assert info['source'] == source return info['id'] @property def image_ids(self): return self._image_ids def source_image_link(self, image_id): """Returns the path or URL to the image. Override this to return a URL to the image if it's available online for easy debugging. """ return self.image_info[image_id]["path"] def load_image(self, image_id): """Load the specified image and return a [H,W,3] Numpy array. """ # Load image image = skimage.io.imread(self.image_info[image_id]['path']) # If grayscale. Convert to RGB for consistency. if image.ndim != 3: image = skimage.color.gray2rgb(image) # If has an alpha channel, remove it for consistency if image.shape[-1] == 4: image = image[..., :3] return image def load_mask(self, image_id): """Load instance masks for the given image. Different datasets use different ways to store masks. Override this method to load instance masks and return them in the form of am array of binary masks of shape [height, width, instances]. Returns: masks: A bool array of shape [height, width, instance count] with a binary mask per instance. class_ids: a 1D array of class IDs of the instance masks. """ # Override this function to load a mask from your dataset. # Otherwise, it returns an empty mask. logging.warning("You are using the default load_mask(), maybe you need to define your own one.") mask = np.empty([0, 0, 0]) class_ids = np.empty([0], np.int32) return mask, class_ids def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"): """Resizes an image keeping the aspect ratio unchanged. min_dim: if provided, resizes the image such that it's smaller dimension == min_dim max_dim: if provided, ensures that the image longest side doesn't exceed this value. min_scale: if provided, ensure that the image is scaled up by at least this percent even if min_dim doesn't require it. mode: Resizing mode. none: No resizing. Return the image unchanged. square: Resize and pad with zeros to get a square image of size [max_dim, max_dim]. pad64: Pads width and height with zeros to make them multiples of 64. If min_dim or min_scale are provided, it scales the image up before padding. max_dim is ignored in this mode. The multiple of 64 is needed to ensure smooth scaling of feature maps up and down the 6 levels of the FPN pyramid (2**6=64). crop: Picks random crops from the image. First, scales the image based on min_dim and min_scale, then picks a random crop of size min_dim x min_dim. Can be used in training only. max_dim is not used in this mode. Returns: image: the resized image window: (y1, x1, y2, x2). If max_dim is provided, padding might be inserted in the returned image. If so, this window is the coordinates of the image part of the full image (excluding the padding). The x2, y2 pixels are not included. scale: The scale factor used to resize the image padding: Padding added to the image [(top, bottom), (left, right), (0, 0)] """ # Keep track of image dtype and return results in the same dtype image_dtype = image.dtype # Default window (y1, x1, y2, x2) and default scale == 1. h, w = image.shape[:2] window = (0, 0, h, w) scale = 1 padding = [(0, 0), (0, 0), (0, 0)] crop = None if mode == "none": return image, window, scale, padding, crop # Scale? if min_dim: # Scale up but not down scale = max(1, min_dim / min(h, w)) if min_scale and scale < min_scale: scale = min_scale # Does it exceed max dim? if max_dim and mode == "square": image_max = max(h, w) if round(image_max * scale) > max_dim: scale = max_dim / image_max # Resize image using bilinear interpolation if scale != 1: image = resize(image, (round(h * scale), round(w * scale)), preserve_range=True) # Need padding or cropping? if mode == "square": # Get new height and width h, w = image.shape[:2] top_pad = (max_dim - h) // 2 bottom_pad = max_dim - h - top_pad left_pad = (max_dim - w) // 2 right_pad = max_dim - w - left_pad padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "pad64": h, w = image.shape[:2] # Both sides must be divisible by 64 assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64" # Height if h % 64 > 0: max_h = h - (h % 64) + 64 top_pad = (max_h - h) // 2 bottom_pad = max_h - h - top_pad else: top_pad = bottom_pad = 0 # Width if w % 64 > 0: max_w = w - (w % 64) + 64 left_pad = (max_w - w) // 2 right_pad = max_w - w - left_pad else: left_pad = right_pad = 0 padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "crop": # Pick a random crop h, w = image.shape[:2] y = random.randint(0, (h - min_dim)) x = random.randint(0, (w - min_dim)) crop = (y, x, min_dim, min_dim) image = image[y:y + min_dim, x:x + min_dim] window = (0, 0, min_dim, min_dim) else: raise Exception("Mode {} not supported".format(mode)) return image.astype(image_dtype), window, scale, padding, crop def resize_mask(mask, scale, padding, crop=None): """Resizes a mask using the given scale and padding. Typically, you get the scale and padding from resize_image() to ensure both, the image and the mask, are resized consistently. scale: mask scaling factor padding: Padding to add to the mask in the form [(top, bottom), (left, right), (0, 0)] """ # Suppress warning from scipy 0.13.0, the output shape of zoom() is # calculated with round() instead of int() with warnings.catch_warnings(): warnings.simplefilter("ignore") mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0) if crop is not None: y, x, h, w = crop mask = mask[y:y + h, x:x + w] else: mask = np.pad(mask, padding, mode='constant', constant_values=0) return mask def minimize_mask(bbox, mask, mini_shape): """Resize masks to a smaller version to reduce memory load. Mini-masks can be resized back to image scale using expand_masks() See inspect_data.ipynb notebook for more details. """ soleMask = False if len(bbox.shape) != 2 and len(mask.shape) != 3: soleMask = True _bbox = np.expand_dims(bbox, 0) _mask = np.expand_dims(mask, 2) else: _bbox = bbox _mask = mask mini_mask = np.zeros(mini_shape + (_mask.shape[-1],), dtype=bool) for i in range(_mask.shape[-1]): # Pick slice and cast to bool in case load_mask() returned wrong dtype m = _mask[:, :, i].astype(bool).astype(np.uint8) * 255 y1, x1, y2, x2 = _bbox[i][:4] m = m[y1:y2, x1:x2] if m.size == 0: raise Exception("Invalid bounding box with area of zero") # Resize with bilinear interpolation m = resize(m, mini_shape) mini_mask[:, :, i] = np.around(m).astype(np.bool) return mini_mask[:, :, 0] if soleMask else mini_mask def expand_mask(bbox, mini_mask, image_shape): """Resizes mini masks back to image size. Reverses the change of minimize_mask(). See inspect_data.ipynb notebook for more details. """ if type(image_shape) is not tuple: image_shape = tuple(image_shape) soleMask = False if len(bbox.shape) != 2 and len(mini_mask.shape) != 3: soleMask = True _bbox = np.expand_dims(bbox, 0) _mini_mask = np.expand_dims(mini_mask, 2) else: _bbox = bbox _mini_mask = mini_mask mask = np.zeros(image_shape[:2] + (_mini_mask.shape[-1],), dtype=bool) for i in range(mask.shape[-1]): m = _mini_mask[:, :, i].astype(bool).astype(np.uint8) * 255 y1, x1, y2, x2 = _bbox[i][:4] h = y2 - y1 w = x2 - x1 # Resize with bilinear interpolation m = resize(m, (h, w)) mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool) return mask[:, :, 0] if soleMask else mask def minimize_mask_float(mask, bbox, output_shape=(28, 28), offset=32): """ Minimizes given mask(s) to floating point masks of the given shape :param mask: mask as a 2-D uint8 ndarray of shape (H, W) or masks as a 3-D uint8 ndarray of shape (H, W, N) :param bbox: bbox as a 1-D uint8 ndarray of shape (4) or masks as a 2-D uint8 ndarray of shape (N, 4) :param output_shape: shape of the output mini-mask(s) :param offset: the offset on each side of the image part that will be resized (used to avoid :return: Minimized mask(s) in the same ndarray format as input ones but with output_shape as (H, W) and with float64 dtype """ soleMask = False if len(bbox.shape) != 2 and len(mask.shape) != 3: soleMask = True _bbox = np.expand_dims(bbox, 0) _mask = np.expand_dims(mask, 2) else: _bbox = bbox _mask = mask mini_masks = np.zeros(output_shape + (_mask.shape[-1],), dtype=np.float64) for i in range(_mask.shape[-1]): # Computing mask shape with offset on all sides mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + np.array([offset * 2] * 2)) temp_mask = np.zeros(mask_shape, dtype=np.uint8) # Empty mask y1, x1, y2, x2 = _bbox[i][:4] temp_mask[offset:-offset, offset:-offset] = _mask[y1:y2, x1:x2, i] # Filling it with mask # Resizing to output shape mini_masks[:, :, i] = resize(temp_mask.astype(bool).astype(np.float64), output_shape) return mini_masks[:, :, 0] if soleMask else mini_masks def expand_mask_float(mini_mask, bbox, output_shape=(1024, 1024), offset=32): """ Expands given floating point mini-mask(s) back to binary mask(s) with the same shape as the image :param mini_mask: mini-mask as a 2-D uint8 ndarray of shape (H, W) or mini-masks as a 3-D uint8 ndarray of shape (H, W, N) :param bbox: bbox as a 1-D uint8 ndarray of shape (4) or masks as a 2-D uint8 ndarray of shape (N, 4) :param output_shape: shape of the output mask(s) :param offset: the offset on each side of the image part that will be resized (used to avoid :return: Expanded mask(s) in the same ndarray format as input ones but with output_shape as (H, W) and with uint8 dtype """ if type(output_shape) is not tuple: output_shape = tuple(output_shape) soleMask = False if len(bbox.shape) != 2 and len(mini_mask.shape) != 3: soleMask = True _bbox = np.expand_dims(bbox, 0) _mini_mask = np.expand_dims(mini_mask, 2) else: _bbox = bbox _mini_mask = mini_mask masks = np.zeros(output_shape[:2] + (_mini_mask.shape[-1],), dtype=np.uint8) for i in range(_mini_mask.shape[-1]): mask_shape = tuple(shift_bbox(_bbox[i][:4])[2:] + np.array([offset * 2] * 2)) resized_mask = resize(_mini_mask[:, :, i], mask_shape) y1, x1, y2, x2 = _bbox[i][:4] masks[y1:y2, x1:x2, i] = np.where(resized_mask[offset:-offset, offset:-offset] >= 0.5, 255, 0).astype(np.uint8) return masks[:, :, 0] if soleMask else masks def unmold_mask(mask, bbox, image_shape): """Converts a mask generated by the neural network to a format similar to its original shape. mask: [height, width] of type float. A small, typically 28x28 mask. bbox: [y1, x1, y2, x2]. The box to fit the mask in. Returns a binary mask with the same size as the original image. """ threshold = 0.5 y1, x1, y2, x2 = bbox mask = resize(mask, (y2 - y1, x2 - x1)) mask = np.where(mask >= threshold, 1, 0).astype(np.bool) # Put the mask in the right location. full_mask = np.zeros(image_shape[:2], dtype=np.bool) full_mask[y1:y2, x1:x2] = mask return full_mask ############################################################ # Miscellaneous ############################################################ def export_results(output_path: str, class_ids, boxes=None, masks=None, scores=None, bbox_areas=None, mask_areas=None): """ Exports result dictionary to a JSON file for debug :param output_path: path to the output JSON file :param class_ids: value of the 'class_ids' key of results dictionary :param boxes: value of the 'class_ids' key of results dictionary :param masks: value of the 'class_ids' key of results dictionary :param scores: value of the 'class_ids' key of results dictionary :param bbox_areas: value of the 'bbox_areas' key of results dictionary :param mask_areas: value of the 'masks_areas' key of results dictionary :return: None """ if type(class_ids) is dict: if 'rois' in class_ids: boxes = class_ids['rois'] if 'masks' in class_ids: masks = class_ids['masks'] if 'scores' in class_ids: scores = class_ids['scores'] if 'bbox_areas' in class_ids: bbox_areas = class_ids['bbox_areas'] if 'mask_areas' in class_ids: mask_areas = class_ids['mask_areas'] class_ids = class_ids['class_ids'] oneDArrays = [ (class_ids, "class_ids", int), (scores, "scores", float), (bbox_areas, "bbox_areas", float), (mask_areas, "mask_areas", float), ] data = {key: [arrayType(v) for v in array] for array, key, arrayType in oneDArrays if array is not None} if boxes is not None: data["rois"] = [[int(v) for v in bbox] for bbox in boxes] if masks is not None: data["masks"] = [[[int(bool(v)) * 255 for v in row] for row in mask] for mask in masks] with open(output_path, 'w') as output: json.dump(data, output) def import_results(input_path: str): """ Imports result dictionary from JSON file for debug :param input_path: path to the input JSON file :return: results dictionary """ with open(input_path, 'r') as inputFile: data = json.load(inputFile) keyType = {'rois': np.int32, 'masks': np.uint8, 'class_ids': int, 'scores': float, 'bbox_areas': float, 'mask_areas': float} for key in data.keys(): data[key] = np.array(data[key]).astype(keyType[key]) return data def classes_level(classes_hierarchy): """ Return each level of the given class hierarchy with its classes :param classes_hierarchy: a structure made of list, int for classes of the same lvl, and dict to describe "key class contains value class(es)". ex : [1, {2: [3, 4]}, {5: 6}] -> [[1, 2, 5], [3, 4, 6]] :return: list containing each classes of a level as a list : [[ lvl0 ], [ lvl1 ], ...] """ if type(classes_hierarchy) is int: return [[classes_hierarchy]] # Return a hierarchy with only one level containing the value elif type(classes_hierarchy) is list: res = [] for element in classes_hierarchy: # For each element of the list temp = classes_level(element) for lvl, indices in enumerate(temp): # For each hierarchy level of the current element if len(indices) > 0: if len(res) < lvl + 1: # Adding a new level if needed res.append([]) res[lvl].extend(indices) # Fusing the current hierarchy level to list hierarchy one return res elif type(classes_hierarchy) is dict: res = [[]] for key in classes_hierarchy: res[0].append(key) # Append key to lvl 0 classes if classes_hierarchy[key] is not None: temp = classes_level(classes_hierarchy[key]) for lvl, indices in enumerate(temp): # For each lvl of class inside the value of key element if len(res) < lvl + 2: # Adding a new level if needed res.append([]) res[lvl + 1].extend(indices) # Offsetting each level of the child to be relative to parent class return res def remove_redundant_classes(classes_lvl, keepFirst=True): """ Remove classes that appears more than once in the classes' levels :param classes_lvl: list of each level of classes as list : [[ lvl 0 ], [ lvl 1 ], ...] :param keepFirst: if True, class will be kept in the min level in which it is present, else in the max/last level. :return: [[ lvl 0 ], [ lvl 1 ], ...] with classes only appearing once """ res = [[] for _ in classes_lvl] seenClass = [] for lvlID, lvl in enumerate(classes_lvl[::1 if keepFirst else -1]): # For each lvl in normal or reverse order for classID in lvl: if classID not in seenClass: # Checking if the class ID has already been added or not seenClass.append(classID) # Adding the class ID to the added ones res[lvlID if keepFirst else (-1 - lvlID)].append(classID) # Adding the class to its level for lvl in res: # Removing empty levels if len(lvl) == 0: res.remove(lvl) return res def compute_confusion_matrix(image_shape: iter, expectedResults: dict, predictedResults: dict, num_classes: int, config: Config = None): """ Computes confusion matrix at pixel precision :param image_shape: the initial image shape :param expectedResults: the expected results dict :param predictedResults: the predicted results dict :param num_classes: number of classes (max class ID) :param config: the config object of the AI :return: confusion matrix as a ndarray of shape (num_classes + 1, num_classes + 1), 0 being background class """ expectedImg = create_multiclass_mask(image_shape, expectedResults, config) predictedImg = create_multiclass_mask(image_shape, predictedResults, config) confusion_matrix = np.zeros((num_classes + 1, num_classes + 1), dtype=np.int64) for y in range(image_shape[0]): for x in range(image_shape[1]): confusion_matrix[expectedImg[y, x]][predictedImg[y, x]] += 1 return confusion_matrix def trim_zeros(x): """It's common to have tensors larger than the available data and pad with zeros. This function removes rows that are all zeros. x: [rows, columns]. """ assert len(x.shape) == 2 return x[~np.all(x == 0, axis=1)] def compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, ap_iou_threshold=0.5, min_iou_to_count=0.0, nb_class=-1, confusion_iou_threshold=0.1, classes_hierarchy=None, confusion_background_class=True, confusion_only_best_match=True): """Finds matches between prediction and ground truth instances. Returns: gt_match: 1-D array. For each GT box it has the index of the matched predicted box. pred_match: 1-D array. For each predicted box, it has the index of the matched ground truth box. overlaps: [pred_boxes, gt_boxes] IoU overlaps. """ if nb_class > 0: bg = 1 if confusion_background_class else 0 confusion_matrix = np.zeros((nb_class + bg, nb_class + bg), dtype=np.int64) else: confusion_matrix = None confusion_iou_threshold = 1. classes_hierarchy_ = None if classes_hierarchy is not None and type(classes_hierarchy) is list: classes_hierarchy_ = {list(c.keys())[0]: c[list(c.keys())[0]] for c in classes_hierarchy if type(c) is dict} elif classes_hierarchy is not None and type(classes_hierarchy) is dict: classes_hierarchy_ = classes_hierarchy # Trim zero padding # TODO: cleaner to do zero unpadding upstream gt_boxes = trim_zeros(gt_boxes) gt_masks = gt_masks[..., :gt_boxes.shape[0]] pred_boxes = trim_zeros(pred_boxes) pred_scores = pred_scores[:pred_boxes.shape[0]] # Sort predictions by score from high to low indices = np.argsort(pred_scores)[::-1] pred_boxes = pred_boxes[indices] pred_class_ids = pred_class_ids[indices] pred_scores = pred_scores[indices] pred_masks = pred_masks[..., indices] # Compute IoU overlaps [pred_masks, gt_masks] overlaps = compute_overlaps_masks(pred_masks, pred_boxes, gt_masks, gt_boxes) # Loop through predictions and find matching ground truth boxes pred_match = -1 * np.ones([pred_boxes.shape[0]]) gt_match = -1 * np.ones([gt_boxes.shape[0]]) for pred_idx in range(len(pred_boxes)): # Find best matching ground truth box # 1. Sort matches by score sorted_ixs = np.argsort(overlaps[pred_idx])[::-1] # 2. Remove low scores low_score_idx = np.where(overlaps[pred_idx, sorted_ixs] < min_iou_to_count)[0] if low_score_idx.size > 0: sorted_ixs = sorted_ixs[:low_score_idx[0]] # 3. Find the match match = False pred_class = pred_class_ids[pred_idx] for gt_idx in sorted_ixs: gt_class = gt_class_ids[gt_idx] # If classes_hierarchy is provided and (gt_class, pred_class) are parent/child classes we skip if classes_hierarchy_ is not None and ( ( gt_class in classes_hierarchy_ and pred_class in classes_hierarchy_[gt_class] ) or ( pred_class in classes_hierarchy_ and gt_class in classes_hierarchy_[pred_class] ) ): continue # If we reach IoU smaller than the threshold, end the loop (list is sorted so all the followings will be # smaller too) iou = overlaps[pred_idx, gt_idx] breakAP = iou < ap_iou_threshold breakConfusion = iou < confusion_iou_threshold if breakAP and breakConfusion: break if not breakConfusion and confusion_matrix is not None and (not confusion_only_best_match or not match): match = True if confusion_background_class: confusion_matrix[gt_class][pred_class] += 1 else: confusion_matrix[gt_class - 1][pred_class - 1] += 1 # If ground truth box is already matched, go to next one # TODO : Rework that part, specially for confusion matrix, we are counting positive predictions for each # match with a gt_mask not only the first time if gt_match[gt_idx] > -1: continue if not breakAP: # Do we have a match? if pred_class == gt_class: gt_match[gt_idx] = pred_idx pred_match[pred_idx] = gt_idx # Something has been predicted but no ground truth annotation if confusion_matrix is not None and confusion_background_class and not match: confusion_matrix[0][pred_class] += 1 # Looking for a ground truth box without overlapping prediction if confusion_matrix is not None and confusion_background_class: for gt_idx in range(len(gt_match)): if gt_match[gt_idx] == -1: if gt_class_ids[gt_idx] > nb_class: print(f"Error : got class id = {gt_class_ids[gt_idx]} while max class id = {nb_class}") else: confusion_matrix[gt_class_ids[gt_idx]][0] += 1 return gt_match, pred_match, overlaps, confusion_matrix def compute_ap(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold=0.5, score_threshold=0.3, nb_class=-1, confusion_iou_threshold=0.3, classes_hierarchy=None, confusion_background_class=True, confusion_only_best_match=True): """Compute Average Precision at a set IoU threshold (default 0.5). Returns: mAP: Mean Average Precision precisions: List of precisions at different class score thresholds. recalls: List of recall values at different class score thresholds. overlaps: [pred_boxes, gt_boxes] IoU overlaps. """ # Get matches and overlaps gt_match, pred_match, overlaps, confusion_matrix = compute_matches( gt_boxes=gt_boxes, gt_class_ids=gt_class_ids, gt_masks=gt_masks, min_iou_to_count=score_threshold, pred_boxes=pred_boxes, pred_class_ids=pred_class_ids, pred_masks=pred_masks, pred_scores=pred_scores, nb_class=nb_class, ap_iou_threshold=iou_threshold, confusion_iou_threshold=confusion_iou_threshold, classes_hierarchy=classes_hierarchy, confusion_background_class=confusion_background_class, confusion_only_best_match=confusion_only_best_match ) if len(gt_class_ids) == len(pred_class_ids) == 0: return 1., 1., 1., overlaps, confusion_matrix # Compute precision and recall at each prediction box step precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1) recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match) for i in range(len(recalls)): if
np.isnan(recalls[i])
numpy.isnan
u""" Calculates water and steam properties. Classes: Water: water object. References: [1] The International Association for the Properties of Water and Steam. Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam. August 2007. """ from thermopy.units import Pressure, Temperature, Enthalpy from numpy import array, sum, sqrt, log from thermopy.constants import ideal_gas_constant, \ ideal_gas_constant_massic_basis import scipy.optimize class Water(object): """ Taken from The International Association for the Properties of Water and Steam. Lucerne, Switzerland. August 2007. Revised Release on the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam. Reference document: IAPWS-IF97. All units in SI and default is molar basis.""" def __init__(self, p, T, massic_basis=False): u"""Initializes a Water object.""" self.p = Pressure(p) self.T = Temperature(T) # check if water is specified by IAPWS-IF97 for these values if self.T < Temperature(273.15) or self.T > Temperature(2273.15): raise ValueError('Temperature ' + str(T) + ' out of range (273.15 - 2273.15K)') if self.p > Pressure(100).unit('MPa') or self.p < 0: raise ValueError('Pressure ' + str(p) + ' out of range (0 - 100 MPa)') if self.T > 1073.15 and self.p > Pressure(50).unit('MPa'): raise ValueError('p or T value out of range') # adjust R to mass or molar basis if massic_basis is True: self.R = ideal_gas_constant_massic_basis[0] # kJ/(kg K); elif massic_basis is False: self.R = ideal_gas_constant[0] # constants # ideal gas constant was already instantiated Tc = 647.096 # Critical point temperature (K) pc = Pressure(22.064).unit('MPa') # Critical point pressure (MPa) rhoc = 322 # Critical point density kg/m3 Tt = 273.16 # Triple point temperature (K) pt = 611.657 # Triple point pressure (Pa) ht = 0.611783 # Enthalpy at triple point (J/kg) def temperature_saturation(self, p=None): """Yields Tsat given a pressure p.""" # module deals with pressure in MPa if p is None: p = self.p.MPa else: p = Pressure(p).MPa if p < Pressure(611.213).unit('Pa').MPa or p > self.pc: raise ValueError('Pressure out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') beta = p ** 0.25 E = 1 * beta ** 2 + ni[2] * beta + ni[5] F = ni[0] * beta ** 2 + ni[3] * beta + ni[6] G = ni[1] * beta ** 2 + ni[4] * beta + ni[7] D = 2 * G / (-F - (F**2 - 4 * E * G)**0.5) return Temperature((ni[9] + D - ((ni[9] + D) ** 2 - 4 * (ni[8] + ni[9] * D)) ** 0.5) * 0.5) def pressure_saturation(self, T=None): """Yields Psat given a temperature T""" if T is None: T = self.T else: T = Temperature(T) if T < 273.15 or T > self.Tc: raise ValueError('Temperature out of range.') # table 34 ni = array([1167.0521452767, -724213.16703206, -17.073846940092, 12020.82470247, -3232555.0322333, 14.91510861353, -4823.2657361591, 405113.40542057, -0.23855557567849, 650.17534844798], dtype='d') v = T + ni[8] / (T - ni[9]) A = 1 * v ** 2 + ni[0] * v + ni[1] B = ni[2] * v ** 2 + ni[3] * v + ni[4] C = ni[5] * v ** 2 + ni[6] * v + ni[7] return Pressure((2 * C / (-B + (B ** 2 - 4 * A * C) ** 0.5)) ** 4 ).unit('MPa') def _is_in_region(self): """Finds a region for the (T, p) point (see IAPWS-IF97 for details). The usefulness of these regions are to divide the physical properties of water into different sets of coefficients and equations.""" # for the 2 - 3 boundary line ni = array([348.05185628969, -1.1671859879975, 0.0010192970039326, 572.54459862746, 13.91883977887], dtype='d') theta = self.T pressure23 = Pressure(ni[0] + ni[1] * theta + ni[2] * theta ** 2).unit('MPa') # exceptional cases if self.T == self.Tt and self.p == self.pt: return 1 # regular cases if self.T >= Temperature(273.15) and self.T <= Temperature(623.15): if self.T < self.temperature_saturation(self.p): return 1 else: return 2 elif self.T > Temperature(623.15) and self.T <= Temperature(1073.15): if self.p > pressure23: return 3 else: return 2 elif self.T >= Temperature(1073.15) and self.T <= Temperature(2273.15): return 5 else: raise Exception('Cannot assign region to the parameters p = ' + str(self.p) + 'T = ' + str(self.T) + 'given.') def _basic_equation1(self, value='gamma'): """Returns basic equation 1 and its derivatives, ex: 'gamma', 'gamma_tau', 'gamma_tau_pi', etc.""" # region1 Ii = array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 8, 8, 21, 23, 29, 30, 31, 32], dtype='int') Ji = array([-2, -1, 0, 1, 2, 3, 4, 5, -9, -7, -1, 0, 1, 3, -3, 0, 1, 3, 17, -4, 0, 6, -5, -2, 10, -8, -11, -6, -29, -31, -38, -39, -40, -41], dtype='int') ni = array([0.14632971213167, -0.84548187169114, -3.756360367204, 3.3855169168385, -0.95791963387872, 0.15772038513228, -0.016616417199501, 0.00081214629983568, 0.00028319080123804, -0.00060706301565874, -0.018990068218419, -0.032529748770505, -0.021841717175414, -5.283835796993e-05, -0.00047184321073267, -0.00030001780793026, 4.7661393906987e-05, -4.4141845330846e-06, -7.2694996297594e-16, -3.1679644845054e-05, -2.8270797985312e-06, -8.5205128120103e-10, -2.2425281908e-06, -6.5171222895601e-07, -1.4341729937924e-13, -4.0516996860117e-07, -1.2734301741641e-09, -1.7424871230634e-10, -6.8762131295531e-19, 1.4478307828521e-20, 2.6335781662795e-23, -1.1947622640071e-23, 1.8228094581404e-24, -9.3537087292458e-26], dtype='d') pi = self.p / Pressure(16.53).unit('MPa') tau = Temperature(1386) / self.T if value == 'gamma': return sum(ni * ((7.1 - pi) ** Ii) * ((tau - 1.222) ** Ji)) elif value == 'gamma_tau': return sum(ni * ((7.1 - pi) ** Ii) * Ji * ((tau - 1.222) ** (Ji-1))) elif value == 'gamma_tau_tau': return sum(ni * ((7.1 - pi) ** Ii) * Ji * (Ji - 1) * ((tau - 1.222) ** (Ji - 2))) elif value == 'gamma_pi': return -1 * sum(ni * Ii * ((7.1 - pi) ** (Ii - 1)) * (tau - 1.222) ** Ji) elif value == 'gamma_pi_pi': return sum(ni * Ii * (Ii - 1) * (7.1 - pi) ** (Ii - 2) * (tau - 1.222) ** Ji) elif value == 'gamma_pi_tau' or value == 'gamma_tau_pi': return -1 * sum(ni * Ii * (7.1 - pi) ** (Ii - 1) * Ji * (tau - 1.222) ** (Ji - 1)) else: raise Exception('Function not assigned in _basic_equation1()') def _basic_equation2(self, value='gamma'): """Returns equation 1 and its derivatives, ex: 'gamma', 'gamma_tau', 'gamma_tau_pi', etc.""" # region2; ideal part J0 = array([0, 1, -5, -4, -3, -2, -1, 2, 3], dtype='d') n0 = array([-9.6927686500217, 10.086655968018, -0.005608791128302, 0.071452738081455, -0.40710498223928, 1.4240819171444, -4.383951131945, -0.28408632460772, 0.021268463753307], dtype='d') # region2; real part Ii = array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 10, 10, 10, 16, 16, 18, 20, 20, 20, 21, 22, 23, 24, 24, 24], dtype='d') Ji = array([0, 1, 2, 3, 6, 1, 2, 4, 7, 36, 0, 1, 3, 6, 35, 1, 2, 3, 7, 3, 16, 35, 0, 11, 25, 8, 36, 13, 4, 10, 14, 29, 50, 57, 20, 35, 48, 21, 53, 39, 26, 40, 58], dtype='d') ni = array([-0.0017731742473213, -0.017834862292358, -0.045996013696365, -0.057581259083432, -0.05032527872793, -3.3032641670203e-05, -0.00018948987516315, -0.0039392777243355, -0.043797295650573, -2.6674547914087e-05, 2.0481737692309e-08, 4.3870667284435e-07, -3.227767723857e-05, -0.0015033924542148, -0.040668253562649, -7.8847309559367e-10, 1.2790717852285e-08, 4.8225372718507e-07, 2.2922076337661e-06, -1.6714766451061e-11, -0.0021171472321355, -23.895741934104, -5.905956432427e-18, -1.2621808899101e-06, -0.038946842435739, 1.1256211360459e-11, -8.2311340897998, 1.9809712802088e-08, 1.0406965210174e-19, -1.0234747095929e-13, -1.0018179379511e-09, -8.0882908646985e-11, 0.10693031879409, -0.33662250574171, 8.9185845355421e-25, 3.0629316876232e-13, -4.2002467698208e-06, -5.9056029685639e-26, 3.7826947613457e-06, -1.2768608934681e-15, 7.3087610595061e-29, 5.5414715350778e-17, -9.436970724121e-07], dtype='d') pi = self.p / Pressure(1).unit('MPa') tau = 540 / self.T # arrays if value == 'gamma': return (self._basic_equation2('gamma_0') + self._basic_equation2('gamma_r')) elif value == 'gamma_0': return log(pi) + sum(n0 * (tau ** J0)) elif value == 'gamma_r': return sum(ni * (pi ** Ii) * (tau - 0.5) ** Ji) elif value == 'gamma_0_pi': return 1/pi elif value == 'gamme_0_pi_pi': return -1/(pi ** 2) elif value == 'gamma_0_tau': return 0 + sum(n0 * J0 * (tau ** (J0 - 1))) elif value == 'gamma_0_tau_tau': return 0 +
sum(n0 * J0 * (J0 - 1) * tau ** (J0 - 2))
numpy.sum
# coding=utf-8 # Copyright 2018 The DisentanglementLib Authors. All rights reserved. # Copyright 2021 <NAME>. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file was modified by <NAME> in 2021 """Tests for utils.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest from disentanglement_lib.evaluation.metrics import utils from disentanglement_lib.data.ground_truth import dummy_data import numpy as np class UtilsTest(absltest.TestCase): def test_histogram_discretizer(self): # Input of 2D samples. target = np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [0.6, .5, .4, .3, .2, .1]]) result = utils._histogram_discretize(target, num_bins=3) shouldbe = np.array([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]]) np.testing.assert_array_equal(result, shouldbe) def test_discrete_entropy(self): target = np.array([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]]) result = utils.discrete_entropy(target) shouldbe = np.log(3) np.testing.assert_allclose(result, [shouldbe, shouldbe]) def test_discrete_mutual_info(self): xs = np.array([[1, 2, 1, 2], [1, 1, 2, 2]]) ys = np.array([[1, 2, 1, 2], [2, 2, 1, 1]]) result = utils.discrete_mutual_info(xs, ys) shouldbe = np.array([[np.log(2), 0.], [0., np.log(2)]]) np.testing.assert_allclose(result, shouldbe) def test_split_train_test(self): xs = np.zeros([10, 100]) xs_train, xs_test = utils.split_train_test(xs, 0.9) shouldbe_train = np.zeros([10, 90]) shouldbe_test = np.zeros([10, 10]) np.testing.assert_allclose(xs_train, shouldbe_train) np.testing.assert_allclose(xs_test, shouldbe_test) def test_local_sample_factors(self): random_state = np.random.RandomState(3) # sample range of 10% of num_factors factor_num_values = [1, 9, 10, 11, 100, 101] factor_centroid = np.array([0, 4, 9, 3, 10, 10]) samps = utils.local_sample_factors(1000, 0.1, factor_num_values, factor_centroid, 0, random_state) np.testing.assert_equal(samps.shape, (1000, 6)) self.assertTrue(np.all(samps[:,0] == 0)) # should all have the same value, since 0.1 * 9 < 1 self.assertTrue(np.max(samps[:,1]) - np.min(samps[:,1]) == 0) # should have diameter of 2 for both these for inx in [2,3]: assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1) # should have diameter of 20 for both these for inx in [4,5]: assert_correct_radius(self, samps[:,inx], 10, 0, factor_num_values[inx]-1) # same experiment, but now we don't consider any factor # with numfactors less than 11 to count as continuous (so 10 should now also # return all same values) # sample range of 10% of num_factors factor_num_values = [1, 9, 10, 11, 100, 110] samps = utils.local_sample_factors(1000, 0.15, factor_num_values, factor_centroid, 11, random_state) np.testing.assert_equal(samps.shape, (1000, 6)) self.assertTrue(np.all(samps[:,0] == 0)) # should all have the same value for inx in [1,2]: self.assertTrue(np.max(samps[:,inx]) - np.min(samps[:,inx]) == 0) # should have radius 1 for this, since floor(0.15 * 11) = 1 for inx in [3]: assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1) # should have diameter of 20 for both these for inx in [4]: assert_correct_radius(self, samps[:,inx], 15, 0, factor_num_values[inx]-1) for inx in [5]: assert_correct_radius(self, samps[:,inx], 16, 0, factor_num_values[inx]-1) def test_sample_integers_around_center(self): random_state = np.random.RandomState(3) for i in range(20): sample = utils.sample_integers_around_center(5, 3, 0, 10, 100, random_state) self.assertTrue(np.all(sample <= 8)) self.assertTrue(np.all(sample >= 2)) self.assertTrue(np.any(sample > 6)) self.assertTrue(np.any(sample < 4)) for i in range(20): sample = utils.sample_integers_around_center(5, 3, 4, 6, 100, random_state) self.assertTrue(np.all(sample <= 6)) self.assertTrue(
np.all(sample >= 4)
numpy.all
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE import numpy as np import skhep_testdata from numpy.testing import assert_array_equal import uproot def test_read_TProfile2D(): file = skhep_testdata.data_path("uproot-issue-227a.root") with uproot.open(file) as h: T = h["hprof2d"] assert T.kind == "MEAN" assert_array_equal(T.axis("x").edges(), np.array([1.0, 2.0, 3.0])) assert_array_equal(T.axis("y").edges(),
np.array([1.0, 2.0, 3.0, 4.0])
numpy.array
#!/usr/bin/env python # -*- coding: utf-8 -*- """Testing suite for BEKK. """ from __future__ import print_function, division import unittest as ut import numpy as np import numpy.testing as npt from bekk import BEKK, ParamStandard, ParamSpatial, simulate_bekk from bekk import filter_var_python, likelihood_python from bekk.recursion import filter_var from bekk.likelihood import likelihood_gauss class BEKKTestCase(ut.TestCase): """Test BEKK.""" def test_simulation(self): """Test simulation.""" nstocks = 6 nobs = 10 # A, B, C - n x n matrices amat = np.eye(nstocks) * .09**.5 bmat = np.eye(nstocks) * .9**.5 target = np.eye(nstocks) param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target) for distr in ['normal', 'student', 'skewt']: innov, hvar = simulate_bekk(param, nobs=nobs, distr=distr) self.assertEqual(innov.shape, (nobs, nstocks)) self.assertEqual(hvar.shape, (nobs, nstocks, nstocks)) def test_simulation_spatial(self): """Test simulation spatial.""" nobs = 10 nstocks = 4 groups = [[(0, 1), (2, 3)]] ncat = 1 alpha, beta, gamma = .01, .16, .09 # A, B, C - n x n matrices avecs = np.ones((ncat+1, nstocks)) * alpha**.5 bvecs = np.ones((ncat+1, nstocks)) * beta**.5 dvecs = np.vstack([np.ones((1, nstocks)), np.ones((ncat, nstocks)) * gamma**.5]) param = ParamSpatial.from_abdv(avecs=avecs, bvecs=bvecs, dvecs=dvecs, groups=groups) for distr in ['normal', 'student', 'skewt']: innov, hvar = simulate_bekk(param, nobs=nobs, distr=distr) self.assertEqual(innov.shape, (nobs, nstocks)) self.assertEqual(hvar.shape, (nobs, nstocks, nstocks)) def test_filter_var(self): """Test recursions.""" nstocks = 2 nobs = 2000 # A, B, C - n x n matrices amat = np.eye(nstocks) * .09**.5 bmat = np.eye(nstocks) * .9**.5 target =
np.eye(nstocks)
numpy.eye
"""The module for detecting axes""" import traceback import math import cv2 import numpy as np from pytesseract import pytesseract as pt from sklearn.cluster import KMeans from .__settings__ import TESTING from .image_processing import CV2PIL, contrast_enhance GRAY_SCALE_LEVEL = 64 GRAY_SCALE_BINARY = 128 GRAY_RANGE = 10 GAP_PERSENTAGE = 0.04 SCALE_DEGREE = 2 MINI_TICK_EXTRA = 2 THRES_PERSENTAGE = 0 MARGIN = 3 def entropy(labels, base=None): """ Computes entropy of label distribution. """ n_labels = len(labels) if n_labels <= 1: return 0 value, counts = np.unique(labels, return_counts=True) probs = counts / n_labels n_classes = np.count_nonzero(probs) if n_classes <= 1: return 0 ent = 0. # Compute entropy base = math.e if base is None else base for i in probs: ent -= i * math.log(i, base) return ent def understand_data(data): """Parse the pytesseract data""" lines = data.split("\n") items = [] head = lines[0].split("\t") for k, line in enumerate(lines): if k == 0: continue item = {} attributes = line.split("\t") if len(head) != len(attributes): continue # assert len(head) == len(attributes), "Oops, the head number is not equal to the attributes number of this item" for i, attr in enumerate(attributes): item[head[i]] = attr if item["text"] != " " and item["text"] != "": item["width"] = int(item['width']) item["top"] = int(item['top']) item["left"] = int(item['left']) item["height"] = int(item['height']) item["conf"] = int(item["conf"]) items.append(item) return items def classify_texts(direction, f_items, ticks, labels): """The function for classifying ticks and labels""" proj_positions = [] vertical_direction = (direction + 90) / 180 * math.pi vec_len = 10 vertical_vector = { "x": vec_len * math.cos(vertical_direction), "y": vec_len * math.sin(vertical_direction) } # Project to the vertical direction for f_item in f_items: pos = f_item.get("position") proj_pos = [pos.get("x") * vertical_vector.get("x") + pos.get("y") * vertical_vector.get("y")] proj_positions.append(proj_pos) proj_data = np.array(proj_positions) # Classify based on the projected positions estimator = KMeans(n_clusters=2) estimator.fit(proj_data) label_pred = estimator.labels_ classes = {} for i in range(len(f_items)): label_i = label_pred[i] class_i = classes.get(label_i) if class_i is None: classes[label_i] = [i] else: class_i.append(i) # Assume that there are more ticks texts than the label texts tick_class = None label_class = None if len(classes.get(0)) > len(classes.get(1)): tick_class = classes.get(0) label_class = classes.get(1) else: tick_class = classes.get(1) label_class = classes.get(0) # Pack the results for tick_i in tick_class: ticks.append(f_items[tick_i]) for label_i in label_class: labels.append(f_items[label_i]["text"]) def get_format_axis(ticks_data, label_texts, ticks_bbox, img_shape, axis_bbox, axis_direction, axis_score): """Pack the textual information of the axis""" axis = {} axis_range = { "x": [axis_bbox.get("x"), axis_bbox.get("x") + axis_bbox.get("width")], "y": [axis_bbox.get("y"), axis_bbox.get("y") + axis_bbox.get("height")] } ticks = [] tick_range = { "x": 0, "y": 0, "width": 0, "height": 0 } if ticks_bbox is not None: tick_range = ticks_bbox.copy() # format_items = [] for tick_data in ticks_data: item_text = tick_data.get("text") tick = {} tick["text"] = item_text if TESTING["axis"]["sign"]: print("---------------------------------------") print("tick_data: ", tick_data) print("axis_bbox: ", axis_bbox) print("tick_range: ", tick_range) print("---------------------------------------") tick_bbox = { "x": round(tick_data["left"] / SCALE_DEGREE) + tick_range.get("x") + axis_bbox.get("x"), "y": round(tick_data["top"] / SCALE_DEGREE) + tick_range.get("y") + axis_bbox.get("y"), "width": round(tick_data.get("width") / SCALE_DEGREE), "height": round(tick_data.get("height") / SCALE_DEGREE) } if (tick_bbox["x"] > MINI_TICK_EXTRA) and\ (tick_bbox["x"] + tick_bbox["width"] + MINI_TICK_EXTRA < img_shape["width"]): tick_bbox["x"] = tick_bbox["x"] - MINI_TICK_EXTRA tick_bbox["width"] = tick_bbox["width"] + 2 * MINI_TICK_EXTRA if (tick_bbox["y"] > MINI_TICK_EXTRA) and\ (tick_bbox["y"] + tick_bbox["height"] + MINI_TICK_EXTRA < img_shape["height"]): tick_bbox["y"] = tick_bbox["y"] - MINI_TICK_EXTRA tick_bbox["height"] = tick_bbox["height"] + 2 * MINI_TICK_EXTRA tick["bbox"] = tick_bbox position = { "x": tick_bbox["x"] + tick_bbox["width"] / 2, "y": tick_bbox["y"] + tick_bbox["height"] / 2 } tick["position"] = position if tick["text"] != "": ticks.append(tick) # format_items.append(tick) # Classify if the texts belong to the ticks or the label # classify_texts(axis_direction, format_items, ticks, labels) axis_label = label_texts if isinstance(axis_label, list): axis_label = " ".join(axis_label) axis["label"] = axis_label axis["axis_data"] = { "ticks": ticks, "direction": axis_direction } # make up the common object-detection data axis["class"] = "axis" axis["score"] = axis_score axis["color"] = None axis["bbox"] = axis_bbox axis["mask"] = [[ [axis_range["x"][0], axis_range["y"][0]], [axis_range["x"][0], axis_range["y"][1]], [axis_range["x"][1], axis_range["y"][1]], [axis_range["x"][1], axis_range["y"][0]] ]] axis["position"] = { "x": (axis_range["x"][0] + axis_range["x"][1]) / 2, "y": (axis_range["y"][0] + axis_range["y"][1]) / 2 } axis["size"] = { "area": axis_bbox.get("width") * axis_bbox.get("height"), "x_range": axis_range.get("x"), "y_range": axis_range.get("y") } return axis def divide_by_threshold(array): """Find ranges with the value larger than the given threshold""" # Step 0: decide the threshold and the min_count min_count = 3 # if array.size > 0: # min_count = round(float(array.size) * GAP_PERSENTAGE) threshold = 0 if array.size > 0: threshold = 0 #array.max() * THRES_PERSENTAGE # Step 1: divide the array by the given threshold empty_ranges = {} temp_range = {} # Step 1-1: find the empty ranges for _id, _value in enumerate(array): if _value <= threshold: if temp_range: temp_range["end"] = _id temp_range["length"] = temp_range["length"] + 1 else: temp_range["start"] = _id temp_range["end"] = _id temp_range["length"] = 1 elif temp_range: temp_range = {} if temp_range and temp_range["length"] >= min_count: range_head = temp_range["start"] if empty_ranges.get(range_head) is None: empty_ranges[range_head] = temp_range # Step 1-2: find the non-empty ranges solid_ranges = {} temp_range = {} for _id, _value in enumerate(array): is_empty = False for empty_range in empty_ranges.values(): if empty_range["start"] <= _id <= empty_range["end"]: is_empty = True break if not is_empty: if temp_range: temp_range["end"] = _id temp_range["length"] = temp_range["length"] + 1 else: temp_range["start"] = _id temp_range["end"] = _id temp_range["length"] = 1 else: temp_range = {} if temp_range and temp_range["length"] > 1: range_head = temp_range["start"] if solid_ranges.get(range_head) is None: solid_ranges[range_head] = temp_range # Step 2: Decide which range belongs to which category line_range = None tick_range = None title_range = None if not (empty_ranges and solid_ranges): return line_range, tick_range, title_range range_values = list(solid_ranges.values()) range_count = len(range_values) # Case 1: only the ticks if range_count == 1: tick_range = range_values[0] # Case 2: line + tick or line + tick + title elif range_count > 0: min_range_len = float('Inf') min_range_id = -1 # Assume the line to be the smallest range for range_id, range_value in enumerate(range_values): if range_value["length"] < min_range_len: min_range_len = range_value["length"] min_range_id = range_id if min_range_id >= 0: line_range = range_values.pop(min_range_id) # Find the range closest to the line if line_range: min_dist = float('Inf') min_dist_id = -1 for range_id, range_value in enumerate(range_values): dist = min(\ abs(range_value["start"] - line_range["start"]), \ abs(range_value["start"] - line_range["end"]), \ abs(range_value["end"] - line_range["start"]), \ abs(range_value["end"] - line_range["end"])) if dist < min_dist: min_dist = dist min_dist_id = range_id if min_dist_id >= 0: tick_range = range_values.pop(min_dist_id) # If there are still ranges left, it must be the title if range_values: title_range = range_values[0] return line_range, tick_range, title_range def partition_axis(axis_img, axis_id, axis_direction): """Partition the axis image into three parts: line, tick_texts and title""" line_array = None tick_array = None title_array = None ticks_bbox = None # Initialize axis_array = cv2.cvtColor(axis_img, cv2.COLOR_BGR2GRAY).astype(np.uint8) row_num = axis_array.shape[0] col_num = axis_array.shape[1] # Denoising: bilateral filtering axis_array_smooth = cv2.bilateralFilter(axis_array, 4, 50, 50) # Simplify the gray scales axis_array_simp = (axis_array_smooth / GRAY_SCALE_LEVEL).astype(np.uint8) axis_array_simp = axis_array_simp * GRAY_SCALE_LEVEL # axis_array_simp[(axis_array > bg_gray - GRAY_RANGE) \ # & (axis_array < bg_gray + GRAY_RANGE)] = bg_gray # Calculate the entropy of the simplified image row_ent = np.zeros(row_num) col_ent =
np.zeros(col_num)
numpy.zeros
import torch import matplotlib.pyplot as plt import numpy as np import time import floris.tools as wfct from superposition import super_position from optimisation import FLORIS_wake_steering, CNNwake_wake_steering from superposition import FLORIS_farm_power, CNNWake_farm_power __author__ = "<NAME>" __copyright__ = "Copyright 2021, CNNwake" __credits__ = ["<NAME>"] __license__ = "MIT" __version__ = "1.0" __email__ = "<EMAIL>" __status__ = "Development" def visualize_turbine(plane, domain_size, nr_points, title="", ax=None): """ Function to plot the flow field around a single turbine Args: plane (2d numpy array): Flow field around turbine domain_size (list or numpy array): x and y limits of the domain, the first two values correspond to min and max of x and similar for the y values [x_min, x_max, y_min, y_max] nr_points (list or numpy array): Nr. of points in the array title (str, optional): Title of the graph. Defaults to "". ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot to this location. Returns: ax.pcolormesh: Image of the flow field """ # create mesh grid for plotting x = np.linspace(domain_size[0], domain_size[1], nr_points[0]) y = np.linspace(domain_size[2], domain_size[3], nr_points[1]) x_mesh, y_mesh = np.meshgrid(x, y) # Plot the cut-through im = ax.pcolormesh(x_mesh, y_mesh, plane, shading='auto', cmap="coolwarm") ax.set_title(title) # Make equal axis ax.set_aspect("equal") return im def visualize_farm( plane, nr_points, size_x, size_y, title="", ax=None, vmax=False): """ Function to plot flow-field around a wind farm. Args: plane (2d numpy array): Flow field of wind farm nr_points (list or np array): List of nr of points in x and y size_x (int): Size of domain in x direction (km) size_y (int): Size of domain in y direction (km) title (str, optional): Title of the plot. Defaults to "". ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot to this location. vmax (bool, optional): Maximum value to plot. If false, the max value of the plane is used a vmax Returns: ax.pcolormesh: Image of the flow field around the wind farm """ x =
np.linspace(0, size_x, nr_points[0])
numpy.linspace
import numpy as np class BetaRadomization(): def __init__(self, beta): """ Do initliatization """ self.mhf = 2 # maximal horzontal frequency self.mvf = 5 # maximal vertical frequency self.height_max = 5 self.offset = [] self.beta = beta # sample number of furier components, sample random offsets to one another, # Independence Height and angle self.number_height = np.random.randint(3,5) self.number_angle = np.random.randint(6,10) # sample frequencies self.frequencies_angle = np.random.randint(1, self.mhf, size=self.number_angle) self.frequencies_height = np.random.randint(0, self.mvf, size=self.number_angle) # sample frequencies self.offseta = np.random.uniform(0, 2*np.pi, size=self.number_angle) self.offseth = np.random.uniform(0, 2*np.pi, size=self.number_angle) self.intensitya = np.random.uniform(0, 0.1/self.number_angle/2, size=self.number_angle) self.intensityh = np.random.uniform(0, 0.1/self.number_angle/2, size=self.number_angle) pass def propagate_in_time(self, timestep): self.offseta += self.frequencies_angle * timestep/10 self.offseth += self.frequencies_height * timestep / 10 pass def setup(self, beta): pass def _function(self, angle_h=None, height=None): was_None = False if height is None: height = np.linspace(0, self.height_max, 200)/self.height_max*2*np.pi was_None = True if angle_h is None: angle_h =
np.linspace(0, 2*np.pi, 200)
numpy.linspace
import os import cv2 import math import numpy as np import tensorflow as tf from tensorflow.contrib.framework.python.ops import add_arg_scope from tensorflow.python.training import moving_averages from PIL import Image, ImageDraw ################### Mask ################### def random_bbox(img_height=256, img_width=256, margins=0, mask_size=128, random_mask=True): """Generate a random tlhw with configuration. Args: img_height: height of image. img_width: width of image. margins: margins of mask and image border. mask_size: size of mask. random_mask: if True, random location. if False, central location. Returns: tuple: (top, left, height, width) """ if random_mask is True: maxt = img_height - margins - mask_size maxl = img_width - margins - mask_size t = tf.random_uniform( [], minval=margins, maxval=maxt, dtype=tf.int32) l = tf.random_uniform( [], minval=margins, maxval=maxl, dtype=tf.int32) else: t = (img_height - mask_size)//2 l = (img_width - mask_size)//2 h = tf.constant(mask_size) w = tf.constant(mask_size) return (t, l, h, w) def bbox2mask(bbox, img_height=256, img_width=256, max_delta=32, name='mask'): """Generate mask tensor from bbox. Args: bbox: configuration tuple, (top, left, height, width) img_height: height of image. img_width: width of image. max_delta: max delta of masks. name: name of variable scope. Returns: tf.Tensor: output with shape [1, H, W, 1] """ def npmask(bbox, height, width, delta): mask = np.zeros((1, height, width, 1), np.float32) h = np.random.randint(delta//2+1) w = np.random.randint(delta//2+1) mask[:, bbox[0]+h:bbox[0]+bbox[2]-h, bbox[1]+w:bbox[1]+bbox[3]-w, :] = 1. return mask with tf.variable_scope(name), tf.device('/cpu:0'): mask = tf.py_func( npmask, [bbox, img_height, img_width, max_delta], tf.float32, stateful=False) mask.set_shape([1] + [img_height, img_width] + [1]) return mask def brush_stroke_mask(img_height=256, img_width=256, name='mask'): """Generate free form mask tensor. Returns: tf.Tensor: output with shape [1, H, W, 1] """ min_num_vertex = 4 max_num_vertex = 12 mean_angle = 2*math.pi / 5 angle_range = 2*math.pi / 15 min_width = 12 max_width = 40 def generate_mask(H, W): average_radius = math.sqrt(H*H+W*W) / 8 mask = Image.new('L', (W, H), 0) for _ in range(np.random.randint(1, 4)): num_vertex = np.random.randint(min_num_vertex, max_num_vertex) angle_min = mean_angle - np.random.uniform(0, angle_range) angle_max = mean_angle + np.random.uniform(0, angle_range) angles = [] vertex = [] for i in range(num_vertex): if i % 2 == 0: angles.append(2*math.pi - np.random.uniform(angle_min, angle_max)) else: angles.append(np.random.uniform(angle_min, angle_max)) h, w = mask.size vertex.append((int(np.random.randint(0, w)), int(
np.random.randint(0, h)
numpy.random.randint
''' This module downloads the original CIFAR10 data set from the official web site and converts it into numpy/sklearn format. Author: <NAME>, M.O.Franz ''' import os, struct import io from array import array import numpy as np import urllib.request import io import tarfile import pickle def read(path = "."): """ Python function for importing the CIFAR10 data set:: X0,y0,X1,y1,X2,y2 = load_cifar10.read(path) returns the 3 x 32 x 32 input images as rows of the int32 data matrix X with format n x 3072 and the labels as n-vector of integers as class labels (n=45.500 for training, n = 4.500 for validation and testing). X0,y0 ist the training, X1, y1 the test and X2, y2 the validation set. Parameters: * 'path' is the path where the dataset is stored or downloaded. Calls loader() to download CIFAR10 if necessary. """ # download if necessary if not (os.path.exists(os.path.join(path, 'data_batch_1'))): path = loader() # training dataset X0 = [] y0 = [] for i in range(1,6,1): fo = open(str(path + 'data_batch_' + str(i)), 'rb') dict = pickle.load(fo, encoding="latin1") fo.close() arr = np.asarray(dict['labels']) arr = arr.reshape((arr.shape[0],1)) if len(X0) == 0: X0 = dict['data'] y0 = arr else: X0 = np.concatenate((X0, dict['data']), axis=0) y0 = np.concatenate((y0, arr), axis=0) # convert to int32 X0 =
np.array(X0, dtype="int32")
numpy.array
import unittest import numpy as np import nibabel as nib from meta_analysis import Maps from globals_test import affine class NPeaksTestCase(unittest.TestCase): def setUp(self): self.array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) self.array2 = np.array([[[[1, 9], [2, 8], [3, 7]], [[4, 6], [5, 5], [6, 4]], [[7, 3], [8, 2], [9, 1]]]]) self.mask_data = np.array([[[0, 0, 1], [0, 1, 0], [0, 0, 0]]]) self.mask = nib.Nifti1Image(self.mask_data, affine) self.expected = np.array([45]) self.expected2 = np.array([45, 45]) self.expected_masked = np.array([8]) self.expected_masked2 = np.array([8, 12]) self.Ni, self.Nj, self.Nk = self.mask_data.shape def test_one_map(self): maps = Maps(self.array, Ni=self.Ni, Nj=self.Nj, Nk=self.Nk) self.assertTrue(np.array_equal(maps.n_peaks(), self.expected)) def test_two_maps(self): maps2 = Maps(self.array2, Ni=self.Ni, Nj=self.Nj, Nk=self.Nk) self.assertTrue(np.array_equal(maps2.n_peaks(), self.expected2)) def test_one_map_masked(self): maps = Maps(self.array, Ni=self.Ni, Nj=self.Nj, Nk=self.Nk, mask=self.mask) self.assertTrue(np.array_equal(maps.n_peaks(), self.expected_masked)) def test_two_maps_masked(self): maps2 = Maps(self.array2, Ni=self.Ni, Nj=self.Nj, Nk=self.Nk, mask=self.mask) self.assertTrue(np.array_equal(maps2.n_peaks(), self.expected_masked2)) class MaxTestCase(unittest.TestCase): def setUp(self): self.array = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) self.array2 = np.array([[[[1, 9], [2, 8], [3, 7]], [[4, 6], [5, 5], [6, 4]], [[7, 3], [8, 2], [9, 1]]]]) self.mask_data = np.array([[[0, 0, 1], [0, 1, 0], [0, 0, 0]]]) self.mask = nib.Nifti1Image(self.mask_data, affine) self.expected0 = 9. self.expected1 = np.array([[9]]) self.expected2 = np.array([[9, 9]]) self.expected_masked1 = np.array([[5]]) self.expected_masked2 =
np.array([[5, 7]])
numpy.array
# Python 3.7.2 version of the ODELAY Image Pipeline import cv2 from fast_histogram import histogram1d import h5py import math import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sqlalchemy as db import pathlib import re import scipy.io as sio from scipy.sparse import csr_matrix from scipy.optimize import minimize import time # internal libraries import tools.fileio as fio def readImage(fileName): imageData = fio.loadmatlab(fileName) # fhandle = h5py.File(fileName, 'r') return imageData def readExcelSheetDisc(fileName): if fileName == None: fileName = fio.openFileDialog() df = pd.read_excel('fileName', sheetname='Sheet1') print("Column headings:") print(df.columns) def readExpDisc(fileName): # Reture ExpObj if fileName ==None: fileName = fio.openFileDialog() expData = fio.loadData(fileName) return CurrExp def roiLoadState(monitorDataFile, roiFile): return None def initializeExperiment(imagePath, dataPath): ''' Write ODELAY Index File to initialize experiment and provide a list of roi to process as well as experiment variables. Critical variables: starting time--must be before all file time points magnification pixel size sensor size Future versions of the microscope control software will write this data into the images. 1. Make ROI Dict that includes Paths to files and number of images in each file. 2. Make Dict of microscope parameters magnification and pixel size and sensor data 3. Write those variables to a hdf5 file for retrival by workers processing each ROI individually. ''' # Parse argument and check to see if it is a path file. if isinstance(imagePath, str): imagePath = pathlib.Path(imagePath) if isinstance(dataPath, str): dataPath = pathlib.Path(dataPath) stageFile = imagePath / 'ODELAY_StageData.mat' expName = imagePath.parts[-1] stageData = fio.loadData(stageFile) roiIndex = stageData['mP']['wellIdx']-1 roiList = list(stageData['mP']['wellID'][roiIndex]) roiList.sort() # Read in which folders are there and check roiFiles = getRoiFileList(imagePath, roiList) backgroundImage = generateBackground(imagePath, roiList[:5]) # TODO: These need to be defined by the mocroscope # Magnificaton, pixel size, camera dimensions, image state, # and image orientation, stage direction possibly pass in image files. if backgroundImage.shape[0] == 2048: magnification = 10 pixSize = 6.5 else: magnification = 20 pixSize = 6.45 odelayDataPath = dataPath / 'ODELAY Roi Data' if not odelayDataPath.exists(): odelayDataPath.mkdir() initFileName = expName + '_Index_ODELAYData.hdf5' expInitFilePath = dataPath / initFileName expDictionary = { 'backgroundImage': backgroundImage, 'defaultFitRanges': np.array([0,0]), 'maxObj': 5000, 'numTimePoints': 320, # number of timeponts 'timerIncrement': 1800, # timer increment in seconds 'threshold_offset': 1, 'pixSize': pixSize, 'sensorSize': np.array(backgroundImage.shape,dtype='int32'), 'magnification': magnification, 'coarseness': 25, 'kernalerode': 3, 'kernalopen': 8, 'roiFiles': roiFiles, 'experiment_name': expName, 'odelayDataPath': str(odelayDataPath), 'expInitFilePath': str(expInitFilePath) } fio.saveDict(expInitFilePath, expDictionary) return expDictionary def generateBackground(imagePath, roiList): ''' Generate sensor background by averaging a number of initial images given by the length of the roiList. ''' # ToDo: add in multicolor support for fluorescent images numImage = len(roiList) roiPath = pathlib.Path('./'+ roiList[0]) imageFileName = pathlib.Path('./'+ roiList[0] + '_1.mat') imageFilePath = imagePath / roiPath / imageFileName imageData = fio.loadData(imageFilePath) imageDim = imageData['rawImage'].shape accumeImage = np.zeros(imageDim[0:2], dtype= 'float') imageDevisor = float(numImage * imageDim[2]) for im in range(numImage): roiPath = pathlib.Path('./'+ roiList[im]) imageFileName = pathlib.Path('./'+ roiList[im] + '_1.mat') imageFilePath = imagePath / roiPath / imageFileName imageData = fio.loadData(imageFilePath) for tile in range(imageDim[2]): floatImage = (1/imageDevisor) * imageData['rawImage'][:,:,tile].astype('float') accumeImage += floatImage accumeImage-= np.min(accumeImage) return accumeImage.astype('uint16') def roiProcess(imagepath, datapath, roiID, verbos = False): ''' Data from Experiment Dictionary or Object ''' if isinstance(imagepath, str): imagePath = pathlib.Path(imagepath) else: imagePath = imagepath if isinstance(datapath, str): dataPath = pathlib.Path(datapath) else: dataPath = datapath indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')] if len(indexList)==1: expIndexPath = dataPath / indexList[0] else: print('Could not find the correct index file or there were more than one in the diretory') expData = fio.loadData(expIndexPath) ##################################### # Load Dictionary variables There has to be a way to dynamically add these ##################################### background = expData['backgroundImage'] defaultFitRanges = expData['defaultFitRanges'] maxObj = expData['maxObj'] numTimePoints = expData['numTimePoints'] # number of timeponts timerIncrement = expData['timerIncrement'] # timer increment in seconds threshold_offset = expData['threshold_offset'] pixSize = expData['pixSize'] sensorSize = expData['sensorSize'] magnification = expData['magnification'] coarseness = expData['coarseness'] kernalerode = expData['kernalerode'] kernalopen = expData['kernalopen'] roiFiles = expData['roiFiles'] experiment_name = expData['experiment_name'] odelayDataPath = dataPath / 'ODELAY Roi Data' ############################ # expData dictionary is a hdf5 file that will contain the correct information # initialize the experiment. Perhaps it should be an ini file but at the momement its not # defaultFitRanges = None # maxObj = 5000 # numTimePoints = 320 # number of timeponts # timerIncrement = 1800 # timer increment in seconds # threshold_offset = 1 # pixSize = 6.45 # magnification = 20 # courseness = 25 # kernalerode = 3 # kernalopen = 8 ############################ # monitorData = fio.loadmat(monitorDataFile) # % Load Well Data # TODO: loadWell State for cronjob or monitor data files # Load state from Database or create one if it doesn't exist # Check number of images analyzed and number not analyzed # NewLoadImage + # LoadOldImage + # ThresholdOldImage + # ThresholdNewImage + # PhaseCorrelate Old New Evaluate SampleDrift + # BlobAnalysis + # Object Track -+ # EnterData into ObjectNext and ObjectTrack Data -+ # Estimate Growth curves -+ # Save Low Bit Depth Image for display # Update well analysis # Shut down workers once caught up. ''' The following code is to initialize data for all wells ''' if isinstance(roiID, str): roiLabel = roiID elif isinstance(roiID, int): roiList = [*roiFiles] roiLabel = roiList[roiID] # Else this will crash roiPath = imagePath / roiLabel imageFileList = os.listdir(roiPath) # Understand this gem of a regular expression sort. imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) numImages = len(imageFileList) if numTimePoints<numImages: numTimePoints = numImages threshold = np.zeros(numTimePoints, dtype='uint16') # Array 1 x numTimePoints uint16 # imageFileList = []# List of strings stitchMeta = {} # Dictionary or list for image stitching data xyzTime = np.zeros((numTimePoints, 4), dtype ='float64') timePoints = np.full( numTimePoints, 'nan', dtype='float64') # Array dbl 1 x numTimePoints double objectNext = np.zeros((maxObj, numTimePoints), dtype='uint16') # Array maxObj x numTimePoints uint16 objectTrack = np.zeros((maxObj, numTimePoints), dtype='uint16') # Array maxObj x numTimePoints uint16 objectArea = np.zeros((maxObj, numTimePoints), dtype='uint32') # Array maxObj x numTimePoints double objectCentX = np.zeros((maxObj, numTimePoints), dtype='float64') # Array maxObj x numTimePoints double objectCentY = np.zeros((maxObj, numTimePoints), dtype='float64') # Array maxObj x numTimePoints double numObj = np.zeros(numTimePoints, dtype = 'float64') sumArea = np.zeros( numTimePoints, dtype = 'float64') fitData = np.zeros((maxObj, 17), dtype='float64') # Dictionary array maxObj x 17 double imageHist = np.zeros((numTimePoints, 2**16), dtype = 'uint32') analyzeIndex = np.zeros(numTimePoints, dtype = 'bool') xyDisp = np.zeros((numTimePoints, 4), dtype = 'int32') prImage ={} # End Initialization # processTime = np.zeros() tstart = time.time() print(f'The ROI is {roiID}') # Start Processing Data Here for aI in range(numImages): t0 = time.time() # load New Image imageFilePath = roiPath / imageFileList[aI] anImage = stitchImage(imageFilePath, pixSize, magnification, background) #TODO: Generate a thumbnail of the stitched image for use in the GUI later stitchMeta.update({f'{aI:03d}': anImage['stitchMeta']}) xyzTime[aI,:] = anImage['stitchMeta']['xyzTime'][0:4] xyDim = anImage['Bf'].shape sobelBf = SobelGradient(anImage['Bf']) sobelCent = SobelGradient(anImage['centIm']) threshold[aI] = thresholdImage(sobelBf, threshold_offset, coarseness) imageHist[aI,:] = histogram1d(sobelBf.ravel(), 2**16, [0,2**16], weights = None).astype('uint32') bwBf = np.greater(sobelBf, threshold[aI]).astype('uint8') akernel = np.array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]], dtype='uint8') # dilate # fill # erode # open # bwBf = cv2.dilate(bwBf, akernel, iterations = 1) # okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen)) # bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_CLOSE,okernel) # bwBf = cv2.erode( bwBf, akernel, iterations = 1) # bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_OPEN, okernel) ####### # Python Implementation ekernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalerode, kernalerode)) okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen)) bwBf = cv2.dilate(bwBf, ekernel, iterations = 1) bwBf = cv2.erode( bwBf, ekernel, iterations = 1) bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_OPEN, okernel) bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_CLOSE,okernel) bwBf[1, :] = 1 bwBf[:, 1] = 1 bwBf[:,-1] = 1 bwBf[-1,:] = 1 sumArea[aI] = np.sum(bwBf) anImage['sobelBf'] = sobelBf anImage['bwBf'] = bwBf imageStats = cv2.connectedComponentsWithStats(bwBf, 8, cv2.CV_32S) # imageStats[0] is the number of objects detected # imageStats[1] is the labeled image uint32 # imageStats[2] is a number of objects x 5 List that are object stats # imageStats[3] is object centroids # TODO: Extract Fluorescence data from Fluoresences image # This will be done either using the threshold areas in the # labeled Image to extract corresponding areas in the # fluoresence image and then summing those areas if aI != 0: # Centroid Association # Figure out what the image shift is from the previous Images bw1 = np.greater(sobelCent, threshold[aI]).astype('uint8') bw2 = np.greater(prImage['sobelCent'], threshold[aI]).astype('uint8') # Use FFT phase corelation to determin the offet fT = np.multiply(anImage['fTrans'], prImage['fTrans'].conj()) fTabs = np.divide(fT,abs(fT)) fmag1 = np.fft.ifft2(fTabs) fmag1[0,0] = 0 # The first index of fmag is always 1 so ignor it. r, c = np.where(fmag1 == fmag1.max()) xyDim = anImage['centIm'].shape row = [xyDim[0]-r[0], r[0]] col = [xyDim[1]-c[0], c[0]] rDisp = np.zeros((16,3), dtype = 'int32') cDisp = np.zeros((16,3), dtype = 'int32') cnt = 0 for r in row: for c in col: rDisp[cnt,:] = [r,0,r] cDisp[cnt,:] = [c,0,c] cnt += 1 rDisp[cnt,:] = [0,r,r] cDisp[cnt,:] = [0,c,c] cnt += 1 rDisp[cnt,:] = [r,0,r] cDisp[cnt,:] = [0,c,c] cnt += 1 rDisp[cnt,:] = [0,r,r] cDisp[cnt,:] = [c,0,c] cnt += 1 cond = np.zeros(16,dtype = 'int32') for n in range(16): sw1 = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8') sw2 = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8') swT = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8') rs1 = rDisp[n,0] re1 = rDisp[n,0] + xyDim[0] cs1 = cDisp[n,0] ce1 = cDisp[n,0] + xyDim[1] rs2= rDisp[n,1] re2= rDisp[n,1] + xyDim[0] cs2= cDisp[n,1] ce2= cDisp[n,1] + xyDim[1] sw1[rs1:re1, cs1:ce1] = bw1 sw2[rs2:re2, cs2:ce2] = bw2 swT = sw1*sw2 cond[n] = swT.sum(axis = None, dtype = 'float') ind = cond.argmax() xyDisp[aI,:] = np.array((rDisp[ind,0],cDisp[ind,0],rDisp[ind,1],cDisp[ind,1]), dtype = 'int32') # this gives the overlap vector for aligning the images # Set image Dimensions so they are identical. xyDim = bwBf.shape xyDimP = prImage['bwBf'].shape maxDim = np.max([xyDim, xyDimP],axis = 0) maxDisp = np.array((xyDisp[aI,[0,2]].max(), xyDisp[aI,[1,3]].max()),dtype = 'int32') # To do include translation from images earlier. alDim = np.floor((maxDim-xyDim)/2).astype('int') auDim = maxDim-np.ceil((maxDim-xyDim)/2).astype('int') blDim = np.floor((maxDim-xyDimP)/2).astype('int') buDim = maxDim-np.ceil((maxDim-xyDimP)/2).astype('int') arsV = alDim[0] + xyDisp[aI,0] areV = auDim[0] + xyDisp[aI,0] acsV = alDim[1] + xyDisp[aI,1] aceV = auDim[1] + xyDisp[aI,1] brsV = blDim[0] + xyDisp[aI,2] breV = buDim[0] + xyDisp[aI,2] bcsV = blDim[1] + xyDisp[aI,3] bceV = buDim[1] + xyDisp[aI,3] A = np.zeros((maxDim + maxDisp),dtype = 'uint8') B = np.zeros((maxDim + maxDisp),dtype = 'uint8') aLbl = np.zeros((maxDim + maxDisp),dtype = 'uint16') bLbl = np.zeros((maxDim + maxDisp),dtype = 'uint16') A[arsV:areV,acsV:aceV] = bwBf B[brsV:breV,bcsV:bceV] = prImage['bwBf'] aLbl[arsV:areV,acsV:aceV] = imageStats[1] bLbl[brsV:breV,bcsV:bceV] = prevImStats[1] # % Multiply black and white Images together. This makes a mask # % where colonies overlap. M = A*B ALbl = aLbl*M # Current Labeled Image BLbl = bLbl*M # Prev Labeled Image ccM = cv2.connectedComponents(M, 8, cv2.CV_32S) numObj[aI] = ccM[0] if ccM[0] >5000: print('Number of objectes in ', aI, ' greater than 5000') # ccM is the total number of objects returned in the image ARvl = ALbl.ravel() BRvl = BLbl.ravel() MRvl = ccM[1].ravel() # Create a sparce matrix of the labeled connected component image smM = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])] ), shape=(ccM[0],MRvl.shape[0])) # Get the indices of the non-zero elements of the connected # connected components. Use a list comprehension and # np.split to find the indicies of each labled area in the ccM # matrix. Then make sure that the lables of ALbl and BLbl are # unique by taking the absolute value of the difference between # all the Labeled pixels and summing them. If all pixels are # are identical then that function diffsum should return zero. # If both Labels in each image are unique then no merging of # overlaping objects has occured. trkInds = np.array(([ [ARvl[inds[0]], BRvl[inds[0]]] for inds in np.split(smM.indices, smM.indptr[1:-1]) if diffsum(ARvl[inds])==0 and diffsum(BRvl[inds])==0 ]), dtype = 'int') # Place objects that were linked in the Object Next list into an easier to # address Object Track List. if np.max(trkInds)>=5000: tempInds = trkInds>4999 trkInds[tempInds] = 0 objectNext[trkInds[:,1],aI-1] = trkInds[:,0] rc = objectNext.shape nextHist = histogram1d(objectNext[:,aI-1],rc[0],[0,rc[0]],weights = None).astype('int') discard = np.where(nextHist>1) for val in discard[0]: inds = np.where(objectNext[:,aI-1]==val) objectNext[inds,aI-1] = 0 curInds = np.arange(maxObj, dtype = 'int') curVec = curInds[objectTrack[:,aI-1]!=0] nextVec = objectTrack[curVec,aI-1] if nextVec.shape != 0: objectTrack[curVec,aI] = objectNext[nextVec,aI-1] curVec = curInds[objectTrack[:,aI]!=0] objVec = objectTrack[curVec,aI] objectArea[ curVec, aI] = imageStats[2][objVec,4] objectCentX[curVec, aI] = imageStats[3][objVec,0] objectCentY[curVec, aI] = imageStats[3][objVec,1] # Generate Timepoints for this Data-Set timePoints[aI] = (xyzTime[aI,3]-xyzTime[0,3])*1440 # Matlab stores data in fractional number of days. Convert to minutes number of minutes in a day elif aI == 0: curVec = np.arange(imageStats[0], dtype = 'int') timePoints[aI] = 0 objectTrack[0:imageStats[0],0] = np.arange(imageStats[0], dtype = 'uint16') objectArea[ curVec, aI] = imageStats[2][curVec,4] objectCentX[curVec, aI] = imageStats[3][curVec,0] objectCentY[curVec, aI] = imageStats[3][curVec,1] # set up for next Image by replacing the previous image information prImage = anImage prImage['sobelCent'] = sobelCent prevImStats = imageStats t1 = time.time() print('Image ', aI, ' took ', t1-t0, ' seconds') print((t1-tstart)/60, ' minutes have elapsed') # breakpoint() # This is a filter to get rid of very big stpes in the objectArea that # may be due to either loss of focus or other imaging problems log2Area = np.log2(objectArea.astype('float')) diffArea = np.diff(log2Area,axis=1,n=1, append=0) diffAreaAbs = np.abs( diffArea) dbInds = diffAreaAbs>1 bgSteps = np.cumsum(dbInds,axis=1)==0 objectArea[~bgSteps]= 0 indVec = np.arange(maxObj) numObs = np.sum(objectArea!=0, axis = 1) fitVec = indVec[numObs>5] for m in fitVec: (fitCols, fitData[m,0:16]) = fitGrowthCurves(timePoints, objectArea[m,:],defaultFitRanges) if len(fitVec)==0: fitCols = {'No Data Fit':1} # returnDict = {'anImage': anImage, # 'prImage': prImage, # 'background': background, # 'stitchMeta': stitchMeta, # 'imageHist': imageHist, # 'timePoints': timePoints, # 'objectArea': objectArea, # 'objectTrack': objectTrack, # 'objectCentX': objectCentX, # 'objectCentY': objectCentY, # 'objectNext': objectNext, # 'threshold': threshold, # 'numObj': numObj, # 'sumArea': sumArea, # 'xyDisp': xyDisp, # 'xyzTime': xyzTime, # 'fitData': fitData, # 'roiLabel': roiLabel # } returnDict = {'stitchMeta': stitchMeta, 'imageHist': imageHist, 'timePoints': timePoints, 'objectArea': objectArea, 'objectTrack': objectTrack, 'objectCentX': objectCentX, 'objectCentY': objectCentY, 'objectNext': objectNext, 'threshold': threshold, 'sumArea': sumArea, 'numObj': numObj, 'xyDisp': xyDisp, 'xyzTime': xyzTime, 'fitData': fitData, 'fitDataCols': fitCols, 'roiLabel': roiLabel } fio.saveROI(odelayDataPath, returnDict) return returnDict def roiMacInfo(imagepath, datapath, roiID, verbos = False): ''' Data from Experiment Dictionary or Object ''' if isinstance(imagepath, str): imagePath = pathlib.Path(imagepath) else: imagePath = imagepath if isinstance(datapath, str): dataPath = pathlib.Path(datapath) else: dataPath = datapath indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')] if len(indexList)==1: expIndexPath = dataPath / indexList[0] else: print('Could not find the correct index file or there were more than one in the diretory') expData = fio.loadData(expIndexPath) ##################################### # Load Dictionary variables There has to be a way to dynamically add these ##################################### background = expData['backgroundImage'] defaultFitRanges = expData['defaultFitRanges'] maxObj = expData['maxObj'] numTimePoints = expData['numTimePoints'] # number of timeponts timerIncrement = expData['timerIncrement'] # timer increment in seconds threshold_offset = expData['threshold_offset'] pixSize = expData['pixSize'] sensorSize = expData['sensorSize'] magnification = expData['magnification'] coarseness = expData['coarseness'] kernalerode = expData['kernalerode'] kernalopen = expData['kernalopen'] roiFiles = expData['roiFiles'] experiment_name = expData['experiment_name'] roiSavePath = dataPath / 'ODELAY Roi Data' / f'{roiID}.hdf5' ''' The following code is to initialize data for all wells ''' roiPath = imagePath / roiID fileList = os.listdir(roiPath) imageFileList = [fileName for fileName in fileList if '.mat' in fileName] # Understand this gem of a regular expression sort. imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) numImages = len(imageFileList) if numTimePoints<numImages: numTimePoints = numImages imageInfo = {} # Start Processing Data Here for aI in range(numImages): # # load New Image imageFilePath = roiPath / imageFileList[aI] anImage = stitchImage(imageFilePath, pixSize, magnification, background) # TODO: Generate a thumbnail of the stitched image for use in the GUI later imageInfo[f'{aI:03d}'] = {} imageInfo[f'{aI:03d}']['stitchMeta'] = anImage['stitchMeta'] imageInfo[f'{aI:03d}']['index'] = aI+1 # for imType in anImage['imageLabels'].keys() # flourImageDict = {colList[val] : val for val in range(len(colList))} fluorImageList = [Lbl for Lbl in [*anImage['imageLabels']] if not Lbl=='Bf'] flourDict ={fluorImageList[im]: im for im in range(len(fluorImageList))} for flourIm in fluorImageList: threshold = thresholdImage(anImage[flourIm], threshold_offset, coarseness) flourBw = morphImage(anImage[flourIm], kernalerode, kernalopen, threshold) imageStats = cv2.connectedComponentsWithStats(flourBw, 8, cv2.CV_32S) FRvl = anImage[flourIm].ravel() MRvl = imageStats[1].ravel() # Create a sparce matrix of the labeled connected component image smM = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])]), shape=(imageStats[0],MRvl.shape[0])) objIntensity = np.array(([ np.sum(FRvl[inds]) for inds in np.split(smM.indices, smM.indptr[1:-1]) ]), dtype = 'uint32') imageInfo[f'{aI:03d}'][flourIm] = {} imageInfo[f'{aI:03d}'][flourIm]['threshold'] = threshold imageInfo[f'{aI:03d}'][flourIm]['boundBox'] = imageStats[2] imageInfo[f'{aI:03d}'][flourIm]['centroids'] = imageStats[3] imageInfo[f'{aI:03d}'][flourIm]['objIntensity'] = objIntensity fio.saveDict(roiSavePath, imageInfo) return imageInfo def roiMacSeg(imagepath, datapath, roiID, verbos = False): ''' Data from Experiment Dictionary or Object ''' if isinstance(imagepath, str): imagePath = pathlib.Path(imagepath) else: imagePath = imagepath if isinstance(datapath, str): dataPath = pathlib.Path(datapath) else: dataPath = datapath indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')] if len(indexList)==1: expIndexPath = dataPath / indexList[0] else: print('Could not find the correct index file or there were more than one in the diretory') deadDirPath = dataPath / 'DeadCells' if not deadDirPath.exists(): deadDirPath.mkdir() liveDirPath = dataPath / 'LiveCells' if not liveDirPath.exists(): liveDirPath.mkdir() expData = fio.loadData(expIndexPath) ##################################### # Load Dictionary variables There has to be a way to dynamically add these ##################################### background = expData['backgroundImage'] defaultFitRanges = expData['defaultFitRanges'] maxObj = expData['maxObj'] numTimePoints = expData['numTimePoints'] # number of timeponts timerIncrement = expData['timerIncrement'] # timer increment in seconds threshold_offset = expData['threshold_offset'] pixSize = expData['pixSize'] sensorSize = expData['sensorSize'] magnification = expData['magnification'] coarseness = expData['coarseness'] kernalerode = expData['kernalerode'] kernalopen = expData['kernalopen'] roiFiles = expData['roiFiles'] experiment_name = expData['experiment_name'] roiSavePath = dataPath / 'ODELAY Roi Data' / f'{roiID}.hdf5' if isinstance(roiID, str): roiLabel = roiID elif isinstance(roiID, int): roiList = [*roiFiles] roiLabel = roiList[roiID] # Else this will crash roiPath = imagePath / roiLabel imageFileList = os.listdir(roiPath) # Understand this gem of a regular expression sort. imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) numImages = len(imageFileList) if numTimePoints<numImages: numTimePoints = numImages threshold = np.zeros(numTimePoints, dtype='uint16') # Array 1 x numTimePoints uint16 # imageFileList = []# List of strings stitchMeta = {} # Dictionary or list for image stitching data xyzTime = np.zeros((numTimePoints, 4), dtype ='float64') timePoints = np.full( numTimePoints, 'nan', dtype='float64') # Array dbl 1 x numTimePoints double numObj = np.zeros(numTimePoints, dtype = 'float64') sumArea = np.zeros( numTimePoints, dtype = 'float64') fitData = np.zeros((maxObj, 17), dtype='float64') # Dictionary array maxObj x 17 double imageHist = np.zeros((numTimePoints, 2**16), dtype = 'uint32') analyzeIndex = np.zeros(numTimePoints, dtype = 'bool') xyDisp = np.zeros((numTimePoints, 4), dtype = 'int32') prImage ={} ''' The following code is to initialize data for all wells ''' roiPath = imagePath / roiID fileList = os.listdir(roiPath) imageFileList = [fileName for fileName in fileList if '.mat' in fileName] # Understand this gem of a regular expression sort. imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) numImages = len(imageFileList) if numTimePoints>numImages: numTimePoints = numImages imageInfo = {} liveCnt = 0 deadCnt = 0 # Start Processing Data Here for aI in range(numTimePoints): # # load New Image imageFilePath = roiPath / imageFileList[aI] anImage = opl.stitchImage(imageFilePath, pixSize, magnification, background) # TODO: Generate a thumbnail of the stitched image for use in the GUI later imageInfo[f'{aI:03d}'] = {} imageInfo[f'{aI:03d}']['stitchMeta'] = anImage['stitchMeta'] imageInfo[f'{aI:03d}']['index'] = aI+1 sobelBf = opl.SobelGradient(anImage['Bf']) threshold = opl.thresholdImage(sobelBf, 1.2, coarseness) imageHist[aI,:] = histogram1d(sobelBf.ravel(), 2**16, [0,2**16], weights = None).astype('uint32') bwBf1 = np.greater(sobelBf, threshold).astype('uint8') akernel = np.array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]], dtype='uint8') ####### # Python Implementation kernalerode = 4 kernalopen = 3 ekernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalerode, kernalerode)) okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen)) bwBf2 = cv2.dilate(bwBf1, ekernel, iterations = 2) bwBf3 = cv2.erode( bwBf2, ekernel, iterations = 2) bwBf3[1, :] = 1 bwBf3[:, 1] = 1 bwBf3[:,-1] = 1 bwBf3[-1,:] = 1 sumArea[aI] = np.sum(bwBf3) anImage['bwBf'] = bwBf2 bfImageStats = cv2.connectedComponentsWithStats(bwBf2, 8, cv2.CV_32S) scaledBf = scaleImage(anImage['Bf'], lowCut = 0.00001, highcut = 0.9995, scaleImage = 1) scaledSB = scaleImage(sobelBf, lowCut = 0.00001, highcut = 0.9995, scaleImage = 1) scaledCy5 = scaleImage(anImage['Cy5'], lowCut = 0.00001, highcut = 1, scaleImage = 1) scaledHst = scaleImage(anImage['DAPI'], lowCut = 0.00001, highcut = 1, scaleImage = 1) images = [scaledBf,scaledSB, anImage['bwBf'], scaledCy5, scaledHst] titles = ['scaledBF','scaledSB', "anImage['bwBf']", 'scaledCy5', 'scaledHst'] # for i in range(5): # plt.subplot(2,3,i+1),plt.imshow(images[i],'gray') # plt.title(titles[i]) # plt.xticks([]),plt.yticks([]) # plt.show() imageInfo[f'{aI:03d}']['Bf'] = {} imageInfo[f'{aI:03d}']['Bf']['threshold'] = threshold imageInfo[f'{aI:03d}']['Bf']['boundBox'] = bfImageStats[2] # upper left xy lower right xy imageInfo[f'{aI:03d}']['Bf']['centroids'] = bfImageStats[3] fluorImageList = [Lbl for Lbl in [*anImage['imageLabels']] if not Lbl=='Bf'] flourDict ={fluorImageList[im]: im for im in range(len(fluorImageList))} for flourIm in fluorImageList: threshold = opl.thresholdImage(anImage[flourIm], 1.3, coarseness) flourBw = opl.morphImage(anImage[flourIm], kernalerode, kernalopen, threshold) flImageStats = cv2.connectedComponentsWithStats(flourBw, 8, cv2.CV_32S) FRvl = anImage[flourIm].ravel() MRvl = flImageStats[1].ravel() # Create a sparce matrix of the labeled connected component image fluorPix = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])]), shape=(flImageStats[0],MRvl.shape[0])) objIntensity = np.array(([ np.sum(FRvl[inds]) for inds in np.split(fluorPix.indices, fluorPix.indptr[1:-1]) ]), dtype = 'uint32') imageInfo[f'{aI:03d}'][flourIm] = {} imageInfo[f'{aI:03d}'][flourIm]['threshold'] = threshold imageInfo[f'{aI:03d}'][flourIm]['boundBox'] = flImageStats[2] imageInfo[f'{aI:03d}'][flourIm]['centroids'] = flImageStats[3] imageInfo[f'{aI:03d}'][flourIm]['objIntensity'] = objIntensity # figure out if image has fluorescent centroid in image. imToCheck = flourIm flCents = imageInfo[f'{aI:03d}'][flourIm]['centroids'] cellBounds = imageInfo[f'{aI:03d}']['Bf']['boundBox'] centIn = np.zeros((flCents.shape[0], cellBounds.shape[0]), dtype = 'bool') boundIn= np.zeros((flCents.shape[0], cellBounds.shape[0]), dtype = 'bool') for row in range(flCents.shape[0]): centIn[row,:] = checkCentroid(flCents[row,:], cellBounds, 40, 500) for col in range(cellBounds.shape[0]): boundIn[:,col] = checkBoundBox(flCents, cellBounds[col,:], 40, 500) imageInfo[f'{aI:03d}'][flourIm]['centIn'] = centIn imageInfo[f'{aI:03d}'][flourIm]['boundIn'] = boundIn dapiCents = np.sum(imageInfo[f'{aI:03d}']['DAPI']['centIn'], axis=0) cy5Cents = np.sum(imageInfo[f'{aI:03d}']['Cy5']['centIn'], axis=0) singleDapi = dapiCents == 1 singleCy5 = cy5Cents == 1 deadCell = singleDapi & singleCy5 liveCell = singleDapi & ~singleCy5 deadInds = np.where(deadCell==True) liveInds = np.where(liveCell==True) if type(deadInds[0]) is not tuple and type(liveInds[0]) is not tuple: imageInfo[f'{aI:03d}']['deadCellInds'] = deadInds[0] imageInfo[f'{aI:03d}']['liveCellInds'] = liveInds[0] deadCnt += deadInds[0].shape[0] liveCnt += liveInds[0].shape[0] uniqueDead = np.uinique(deadInds[0]) for ind in np.uinique(deadInds[0]): deadImagePath = deadDirPath / f'{roiID}_{aI:03d}_{ind}.tiff' bBox = cellBounds[ind,:] xi = bBox[0] xe = bBox[0]+bBox[2] yi = bBox[1] ye = bBox[1]+bBox[3] saveIm = anImage['Bf'][yi:ye, xi:xe] retVal = cv2.imwrite(str(deadImagePath), saveIm) uniqueLive = np.uinique(liveInds[0]) for ind in np.unique(liveInds[0]): liveImagePath = liveDirPath / f'{roiID}_{aI:03d}_{ind}.tiff' bBox = cellBounds[ind,:] xi = bBox[0] xe = bBox[0]+bBox[2] yi = bBox[1] ye = bBox[1]+bBox[3] saveIm = anImage['Bf'][yi:ye, xi:xe] retVal = cv2.imwrite(str(liveImagePath), saveIm) fio.saveDict(roiSavePath, imageInfo) return imageInfo def checkCentroid(cent, bB, minDim, maxDim): # check if centroid is within all bounding boxes. # Retruns logical index of which bounding box the centroid is in. x1 = bB[:,0] y1 = bB[:,1] x2 = bB[:,0]+bB[:,2] y2 = bB[:,1]+bB[:,3] test1 = x1<=cent[0] test2 = x2>=cent[0] test3 = y1<=cent[1] test4 = y2>=cent[1] test5 = bB[:,2]>=minDim test6 = bB[:,3]>=minDim test7 = bB[:,2]<=maxDim test8 = bB[:,3]<=maxDim return test1 & test2 & test3 & test4 & test5 & test6 & test7 & test8 def checkBoundBox(cent, bB, minDim, maxDim): # check if centroid is within all bounding boxes. # Retruns logical index of which bounding box the centroid is in. x1 = bB[0] y1 = bB[1] x2 = bB[0]+bB[2] y2 = bB[1]+bB[3] test1 = x1<=cent[:,0] test2 = x2>=cent[:,0] test3 = y1<=cent[:,1] test4 = y2>=cent[:,1] test5 = bB[2]>=minDim test6 = bB[3]>=minDim test7 = bB[2]<=maxDim test8 = bB[3]<=maxDim return test1 & test2 & test3 & test4 & test5 & test6 & test7 & test8 def refitGCs(imagepath, datapath, roiID): return None def gompMinBDt(x, tdata, idata): ''' ''' Klag = np.log((3+5**0.5)/2) a = x[0] b = x[1] tlag = x[2] dT = x[3] yn=a + b*np.exp(-np.exp((Klag/dT)*(dT+tlag-tdata))) vals = np.nansum((yn-idata)**2) return vals def gompBDt(x, tdata): ''' ''' Klag = np.log((3+5**0.5)/2) a = x[0] b = x[1] tlag = x[2] dT = x[3] vals=a + b*np.exp(-np.exp((Klag/dT)*(dT+tlag-tdata))) return vals def findPrmsGompBDt(vecB, vecTlag, vecDT, tdata, adata): ''' Corse-grid search for parameters of the Parameterized Gompertz function ------- Input Parameters vecB: array of B paramters to search vecTlag: array of lag times to search vecDT: array of DT times to search tData: ordered array of timepoints aData: corresponding area data Returns array of estamate parameters estVec[0] = a estimate estVec[1] = B estimate estVec[2] = lag time estimate estVec[3] = dT or time between max velocity and lag time ''' flag=False estVec = np.zeros(4, dtype = 'float') estVec[0] = np.nanmean(adata[0:5]) K = np.log((3+5**0.5)/2) tVec = np.arange(vecTlag.shape[0]) for B in vecB: for tp in tVec[:-1]: tlag = vecTlag[tp] vecDt = vecTlag[tp+1:]-vecTlag[tp] for dT in vecDt: yn=estVec[0]+B*np.exp(-np.exp((K/dT)*(dT+tlag-tdata))) ifunc = np.sum((adata-yn)**2) if (not flag) or (flag and ifunc < fmin): fmin = ifunc estVec[1] = B estVec[2] = tlag estVec[3] = dT flag = True return estVec def fitGrowthCurves(timeVec, areaData, defaultRanges): numTPs = np.sum(areaData!=0) aData = np.log2(areaData[:numTPs]) tData = timeVec[:numTPs] Nsteps = 40 areaMax = np.max(aData) factor=1.05 cumsum = np.cumsum(np.insert(aData, 0, 0)) smthArea = (cumsum[5:] - cumsum[:-5]) / 5 x = tData[:-4] y = smthArea m = np.diff(y[[0,-1]])/np.diff(x[[0,-1]]) yVals = m*x + y[0]-m*x[0] diffVals = smthArea-yVals cumVals = np.cumsum(diffVals) lagInd = np.argmin(diffVals) texInd = np.argmax(diffVals) vmxInd = np.argmin(cumVals) numPos = np.sum(cumVals[vmxInd:]>0) estVec = np.zeros(4, dtype = 'float') meanArea = np.mean(aData[0:5]) stdArea = np.std(aData[0:5]) estVec[0] = meanArea if lagInd < vmxInd and vmxInd < texInd: estVec[2] = tData[lagInd] estVec[3] = tData[vmxInd] - tData[lagInd] estVec[1] = aData[vmxInd] - meanArea elif lagInd < vmxInd and (texInd<lagInd or texInd<vmxInd): estVec[2] = tData[lagInd] estVec[1] = aData[vmxInd] - meanArea estVec[3] = tData[vmxInd] - tData[lagInd] elif lagInd < texInd and (vmxInd < lagInd or vmxInd < texInd): estVec[2] = tData[lagInd] estVec[1] = aData[texInd] - meanArea estVec[3] = (tData[texInd] - tData[lagInd])/2 else: # Use course grid optimization function findPrmsGompF to find # a local minima based on the vecDT = np.linspace(1,2*tData[-1],Nsteps) bmin = 0 bmax = 16 vecTlag = np.linspace(1,tData[-1],Nsteps) vecB = np.linspace(bmin,bmax,Nsteps) estVec= findPrmsGompBDt(vecB, vecTlag, vecDT, tData, aData) Klag = np.log((3+5**0.5)/2) aLow = meanArea-3*stdArea aUp = meanArea+3*stdArea dTLow = 1 dTUp = np.max(tData) bLow = 0.001 bUp = 16 lagLow = 0 lagUp = np.max(tData) bnds = [(aLow, aUp), (bLow,bUp), (lagLow, lagUp), (dTLow, dTUp)] minFit = minimize(gompMinBDt, estVec, args = (tData, aData), bounds = bnds) a = minFit.x[0] b = minFit.x[1] Tlag = minFit.x[2] dT = minFit.x[3] Klag =
np.log((3+5**0.5)/2)
numpy.log
#MIT License # #Copyright (c) 2020 standupmaths # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. def xmaslight(): # This is the code from my #NOTE THE LEDS ARE GRB COLOUR (NOT RGB) # Here are the libraries I am currently using: import time import board import neopixel import re import math # FOR DEBUGGING PURPOSE #import matplotlib.pyplot as plt #import matplotlib.animation as animation # You are welcome to add any of these: # import random import numpy # import scipy import sys # If you want to have user changable values, they need to be entered from the command line # so import sys sys and use sys.argv[0] etc # some_value = int(sys.argv[0]) # IMPORT THE COORDINATES (please don't break this bit) coordfilename = "Python/coords.txt" # FOR DEBUGGING PURPOSE #coordfilename = "xmastree2020/coords.txt" fin = open(coordfilename,'r') coords_raw = fin.readlines() coords_bits = [i.split(",") for i in coords_raw] coords = [] for slab in coords_bits: new_coord = [] for i in slab: new_coord.append(int(re.sub(r'[^-\d]','', i))) coords.append(new_coord) #set up the pixels (AKA 'LEDs') PIXEL_COUNT = len(coords) # this should be 500 pixels = neopixel.NeoPixel(board.D18, PIXEL_COUNT, auto_write=False) # FOR DEBUGGING PURPOSE #pixels = [ 0 for i in range(PIXEL_COUNT) ] # YOU CAN EDIT FROM HERE DOWN # This program is intended to make a neuronal network out of the tree's LEDs. # # By neuronal network I mean: # the light of each LED will be set according to a dynamic variable V # that stands for a model of the electric potential in the membrane of a real neuron. # And these 'neurons' (i.e., LEDs) will have connections between them # that obey the dynamics of chemical synapses in the brain represented by the variable S. # The network is built according to a 'cubic-like' lattice: i.e., # a given LED receives input from the closest LEDs in each of the 6 spatial directions. # Thus, the 'synapse' is represented by a 'virtual' connection, and not a physical one (i.e., the LED wire) # I implemented other 2 types of networks: # a surface networks (only LEDs in the surface of the tree cone 'talk' to each other) # a proximity network (only LEDs within a radius R of each other are connected) # # to visualize the network generated by this algorithm, please run # python view_tree_network.py # first we need to define (a lot of) functions def memb_potential_to_01(V): # V -> dynamic variable (defined in [-1,1]) # the formula below is just a smart way to map # [-1,1] to [0,1], emphasizing bright colors (i.e., colors close to 1) # [0,1] is then mapped on the color_arr below if type(V) is numpy.ndarray: return ((V[:,0]+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors else: return ((V+1.0)*0.5)**4 # raising to 4 is just to emphasize bright colors def memb_potential_to_coloridx(V,n_colors): # V -> dynamic variable # n_colors -> total number of colors return numpy.floor(n_colors*memb_potential_to_01(V)).astype(int) def create_input_lists(neigh): # given a list of neighbors, where neigh[i] is a list of inputs to node i # generate the list of inputs to be used in the simulation presyn_neuron_list = [n for sublist in neigh for n in sublist] cs = numpy.insert(numpy.cumsum([ n.size for n in neigh ]),0,0) input_list = [ numpy.arange(a,b) for a,b in zip(cs[:-1],cs[1:]) ] return input_list,presyn_neuron_list def generate_list_of_neighbors(r,R=0.0,on_conic_surface_only=False): # generates a network of "pixels" # each pixel in position r[i,:] identifies its 6 closest neighbors and should receive a connection from it # if R is given, includes all pixels within a radius R of r[i,:] as a neighbor # the 6 neighbors are chosen such that each one is positioned to the left, right, top, bottom, front or back of each pixel (i.e., a simple attempt of a cubic lattice) # # r -> position vector (each line is the position of each pixel) # R -> neighborhood ball around each pixel # on_conic_surface_only -> if true, only links pixels that are on the conic shell of the tree # # returns: # list of neighbors # neigh[i] -> list of 6 "pixels" closest to i def is_left_neigh(u,v): # u and v are two vectors on the x,y plane # u may be a list of vectors (one vector per row) return numpy.dot(u,[-v[1],v[0]])>0.0 # # the vector [-v[1],v[0]] is the 90-deg CCW rotated version of v def get_first_val_not_in_list(v,l): # auxiliary function # returns first value in v that is not in l if v.size == 0: return None n = len(v) i = 0 while i < n: if not (v[i] in l): return v[i] i+=1 if on_conic_surface_only: # only adds 4 neighbors (top, bottom, left, right) that are outside of the cone defined by the estimated tree cone parameters # cone equation (x**2 + y**2)/c**2 = (z-z0)**2 z0 = numpy.max(r[:,2]) # cone height above the z=0 plane h = z0 + numpy.abs(numpy.min(r[:,2])) # cone total height base_r = (numpy.max( (numpy.max(r[:,1]),numpy.max(r[:,0])) ) + numpy.abs(numpy.min( ( numpy.min(r[:,1]),numpy.min(r[:,0]) ) )))/2.0 # cone base radius c = base_r / h # cone opening radius (defined by wolfram https://mathworld.wolfram.com/Cone.html ) #z_cone = lambda x,y,z0,c,s: z0+s*numpy.sqrt((x**2+y**2)/(c**2)) # s is the concavity of the cone: -1 turned down, +1 turned up cone_r_sqr = lambda z,z0,c: (c*(z-z0))**2 outside_cone = (r[:,0]**2+r[:,1]**2) > cone_r_sqr(r[:,2],z0,c) pixel_list =
numpy.nonzero(outside_cone)
numpy.nonzero
import os import tempfile import numpy as np import scipy.ndimage.measurements as meas from functools import reduce import warnings import sys sys.path.append(os.path.abspath(r'../lib')) import NumCppPy as NumCpp # noqa E402 #################################################################################### def factors(n): return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))) #################################################################################### def test_seed(): np.random.seed(1) #################################################################################### def test_abs(): randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item() assert NumCpp.absScaler(randValue) == np.abs(randValue) components = np.random.randint(-100, -1, [2, ]).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.absArray(cArray), np.abs(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \ 1j * np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9)) #################################################################################### def test_add(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(-100, 100, [shape.rows, shape.cols]) data2 = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArray(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 data2 = np.random.randint(1, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) data1 = np.random.randint(1, 100, [shape.rows, shape.cols]) real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(cArray, value), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) value = np.random.randint(-100, 100) assert np.array_equal(NumCpp.add(value, cArray), data + value) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2) #################################################################################### def test_alen(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.alen(cArray) == shape.rows #################################################################################### def test_all(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1)) #################################################################################### def test_allclose(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) cArray3 = NumCpp.NdArray(shape) tolerance = 1e-5 data1 = np.random.randn(shape.rows, shape.cols) data2 = data1 + tolerance / 10 data3 = data1 + 1 cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance) #################################################################################### def test_amax(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1)) #################################################################################### def test_amin(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1)) #################################################################################### def test_angle(): components = np.random.randint(-100, -1, [2, ]).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \ 1j * np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9)) #################################################################################### def test_any(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item() shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1)) #################################################################################### def test_append(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(0, 100, [shape.rows, shape.cols]) data2 = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(), np.append(data1, data2)) shapeInput = np.random.randint(20, 100, [2, ]) numRows = np.random.randint(1, 100, [1, ]).item() shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(), np.append(data1, data2, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) NumCppols = np.random.randint(1, 100, [1, ]).item() shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(), np.append(data1, data2, axis=1)) #################################################################################### def test_arange(): start = np.random.randn(1).item() stop = np.random.randn(1).item() * 100 step = np.abs(np.random.randn(1).item()) if stop < start: step *= -1 data = np.arange(start, stop, step) assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9)) #################################################################################### def test_arccos(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9)) #################################################################################### def test_arccosh(): value = np.abs(np.random.rand(1).item()) + 1 assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) + 1 cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9)) #################################################################################### def test_arcsin(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9)) #################################################################################### def test_arcsinh(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9)) #################################################################################### def test_arctan(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9)) #################################################################################### def test_arctan2(): xy = np.random.rand(2) * 2 - 1 assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArrayX = NumCpp.NdArray(shape) cArrayY = NumCpp.NdArray(shape) xy = np.random.rand(*shapeInput, 2) * 2 - 1 xData = xy[:, :, 0].reshape(shapeInput) yData = xy[:, :, 1].reshape(shapeInput) cArrayX.setArray(xData) cArrayY.setArray(yData) assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9)) #################################################################################### def test_arctanh(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9)) #################################################################################### def test_argmax(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1)) #################################################################################### def test_argmin(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1)) #################################################################################### def test_argsort(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) dataFlat = data.flatten() assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)], dataFlat[np.argsort(data, axis=None)]) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) dataFlat = data.flatten() assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)], dataFlat[np.argsort(data, axis=None)]) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) pIdx = np.argsort(data, axis=0) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16) allPass = True for idx, row in enumerate(data.T): if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]): allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) pIdx = np.argsort(data, axis=0) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16) allPass = True for idx, row in enumerate(data.T): if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]): allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) pIdx = np.argsort(data, axis=1) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16) allPass = True for idx, row in enumerate(data): if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa allPass = False break assert allPass shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) pIdx = np.argsort(data, axis=1) cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16) allPass = True for idx, row in enumerate(data): if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): allPass = False break assert allPass #################################################################################### def test_argwhere(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) randValue = np.random.randint(0, 100, [1, ]).item() data2 = data > randValue cArray.setArray(data2) assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten()) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag randValue = np.random.randint(0, 100, [1, ]).item() data2 = data > randValue cArray.setArray(data2) assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten()) #################################################################################### def test_around(): value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item() numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item() assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item() cArray.setArray(data) numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item() assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound)) #################################################################################### def test_array_equal(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) cArray3 = NumCpp.NdArray(shape) data1 = np.random.randint(1, 100, shapeInput) data2 = np.random.randint(1, 100, shapeInput) cArray1.setArray(data1) cArray2.setArray(data1) cArray3.setArray(data2) assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape) cArray2 = NumCpp.NdArrayComplexDouble(shape) cArray3 = NumCpp.NdArrayComplexDouble(shape) real1 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag1 = np.random.randint(1, 100, [shape.rows, shape.cols]) data1 = real1 + 1j * imag1 real2 = np.random.randint(1, 100, [shape.rows, shape.cols]) imag2 = np.random.randint(1, 100, [shape.rows, shape.cols]) data2 = real2 + 1j * imag2 cArray1.setArray(data1) cArray2.setArray(data1) cArray3.setArray(data2) assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3) #################################################################################### def test_array_equiv(): shapeInput1 = np.random.randint(1, 100, [2, ]) shapeInput3 = np.random.randint(1, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item()) shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item()) shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) data1 = np.random.randint(1, 100, shapeInput1) data3 = np.random.randint(1, 100, shapeInput3) cArray1.setArray(data1) cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()])) cArray3.setArray(data3) assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3) shapeInput1 = np.random.randint(1, 100, [2, ]) shapeInput3 = np.random.randint(1, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item()) shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item()) shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item()) cArray1 = NumCpp.NdArrayComplexDouble(shape1) cArray2 = NumCpp.NdArrayComplexDouble(shape2) cArray3 = NumCpp.NdArrayComplexDouble(shape3) real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data1 = real1 + 1j * imag1 real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data3 = real3 + 1j * imag3 cArray1.setArray(data1) cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()])) cArray3.setArray(data3) assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3) #################################################################################### def test_asarray(): values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVector2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVector2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayDeque2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayDeque2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointer2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointer2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data) values = np.random.randint(0, 100, [2, ]).astype(np.double) assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values) values = np.random.randint(0, 100, [2, ]).astype(np.double) data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data) real = np.random.randint(0, 100, [2, ]).astype(np.double) imag = np.random.randint(0, 100, [2, ]).astype(np.double) values = real + 1j * imag data = np.vstack([values, values]) assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray() assert np.array_equal(cArrayCast, data.astype(np.uint32)) assert cArrayCast.dtype == np.uint32 shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray() assert np.array_equal(cArrayCast, data.astype(np.complex128)) assert cArrayCast.dtype == np.complex128 shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray() assert np.array_equal(cArrayCast, data.astype(np.complex64)) assert cArrayCast.dtype == np.complex64 shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray() warnings.filterwarnings('ignore', category=np.ComplexWarning) assert np.array_equal(cArrayCast, data.astype(np.double)) warnings.filters.pop() # noqa assert cArrayCast.dtype == np.double #################################################################################### def test_average(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(np.average(data, axis=0), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(np.average(data, axis=0), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(np.average(data, axis=1), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(np.average(data, axis=1), 9)) #################################################################################### def test_averageWeighted(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) cWeights = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) weights = np.random.randint(1, 5, [shape.rows, shape.cols]) cArray.setArray(data) cWeights.setArray(weights) assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \ np.round(np.average(data, weights=weights), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) cWeights = NumCpp.NdArray(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag weights = np.random.randint(1, 5, [shape.rows, shape.cols]) cArray.setArray(data) cWeights.setArray(weights) assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \ np.round(np.average(data, weights=weights), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) cWeights = NumCpp.NdArray(1, shape.cols) data = np.random.randint(0, 100, [shape.rows, shape.cols]) weights = np.random.randint(1, 5, [1, shape.rows]) cArray.setArray(data) cWeights.setArray(weights) assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9), np.round(np.average(data, weights=weights.flatten(), axis=0), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) cWeights = NumCpp.NdArray(1, shape.cols) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag weights = np.random.randint(1, 5, [1, shape.rows]) cArray.setArray(data) cWeights.setArray(weights) assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9), np.round(np.average(data, weights=weights.flatten(), axis=0), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) cWeights = NumCpp.NdArray(1, shape.rows) data = np.random.randint(0, 100, [shape.rows, shape.cols]) weights = np.random.randint(1, 5, [1, shape.cols]) cWeights.setArray(weights) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9), np.round(np.average(data, weights=weights.flatten(), axis=1), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) cWeights = NumCpp.NdArray(1, shape.rows) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag weights = np.random.randint(1, 5, [1, shape.cols]) cWeights.setArray(weights) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9), np.round(np.average(data, weights=weights.flatten(), axis=1), 9)) #################################################################################### def test_binaryRepr(): value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item() assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits) #################################################################################### def test_bincount(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt32(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) cArray.setArray(data) assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt32(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) cArray.setArray(data) minLength = int(np.max(data) + 10) assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(), np.bincount(data.flatten(), minlength=minLength)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt32(shape) cWeights = NumCpp.NdArrayUInt32(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) cArray.setArray(data) cWeights.setArray(weights) assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(), np.bincount(data.flatten(), minlength=0, weights=weights.flatten())) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt32(shape) cWeights = NumCpp.NdArrayUInt32(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16) cArray.setArray(data) cWeights.setArray(weights) minLength = int(np.max(data) + 10) assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(), np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten())) #################################################################################### def test_bitwise_and(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayUInt64(shape) cArray2 = NumCpp.NdArrayUInt64(shape) data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2)) #################################################################################### def test_bitwise_not(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt64(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) cArray.setArray(data) assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data)) #################################################################################### def test_bitwise_or(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayUInt64(shape) cArray2 = NumCpp.NdArrayUInt64(shape) data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2)) #################################################################################### def test_bitwise_xor(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArrayUInt64(shape) cArray2 = NumCpp.NdArrayUInt64(shape) data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2)) #################################################################################### def test_byteswap(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayUInt64(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64) cArray.setArray(data) assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput) #################################################################################### def test_cbrt(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9)) #################################################################################### def test_ceil(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000 cArray.setArray(data) assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9)) #################################################################################### def test_center_of_mass(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000 cArray.setArray(data) assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9), np.round(meas.center_of_mass(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000 cArray.setArray(data) coms = list() for col in range(data.shape[1]): coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9)) assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000 cArray.setArray(data) coms = list() for row in range(data.shape[0]): coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9)) assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9)) #################################################################################### def test_clip(): value = np.random.randint(0, 100, [1, ]).item() minValue = np.random.randint(0, 10, [1, ]).item() maxValue = np.random.randint(90, 100, [1, ]).item() assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item() maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) minValue = np.random.randint(0, 10, [1, ]).item() maxValue = np.random.randint(90, 100, [1, ]).item() assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item() maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa #################################################################################### def test_column_stack(): shapeInput = np.random.randint(20, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) cArray4 = NumCpp.NdArray(shape4) data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols]) data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols]) cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) cArray4.setArray(data4) assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4), np.column_stack([data1, data2, data3, data4])) #################################################################################### def test_complex(): real = np.random.rand(1).astype(np.double).item() value = complex(real) assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) realArray = NumCpp.NdArray(shape) real = np.random.rand(shape.rows, shape.cols) realArray.setArray(real) assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) realArray = NumCpp.NdArray(shape) imagArray = NumCpp.NdArray(shape) real = np.random.rand(shape.rows, shape.cols) imag = np.random.rand(shape.rows, shape.cols) realArray.setArray(real) imagArray.setArray(imag) assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9)) #################################################################################### def test_concatenate(): shapeInput = np.random.randint(20, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) cArray4 = NumCpp.NdArray(shape4) data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols]) data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols]) cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) cArray4.setArray(data4) assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(), np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()])) shapeInput = np.random.randint(20, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item()) shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item()) shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) cArray4 = NumCpp.NdArray(shape4) data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols]) data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols]) cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) cArray4.setArray(data4) assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW), np.concatenate([data1, data2, data3, data4], axis=0)) shapeInput = np.random.randint(20, 100, [2, ]) shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item()) cArray1 = NumCpp.NdArray(shape1) cArray2 = NumCpp.NdArray(shape2) cArray3 = NumCpp.NdArray(shape3) cArray4 = NumCpp.NdArray(shape4) data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols]) data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols]) data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols]) data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols]) cArray1.setArray(data1) cArray2.setArray(data2) cArray3.setArray(data3) cArray4.setArray(data4) assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL), np.concatenate([data1, data2, data3, data4], axis=1)) #################################################################################### def test_conj(): components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9)) #################################################################################### def test_contains(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) value = np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) value = np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) truth = list() for row in data: truth.append(value in row) assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) truth = list() for row in data: truth.append(value in row) assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) value = np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) truth = list() for row in data.T: truth.append(value in row) assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 100, [shape.rows, shape.cols]) imag = np.random.randint(1, 100, [shape.rows, shape.cols]) data = real + 1j * imag value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item() cArray.setArray(data) truth = list() for row in data.T: truth.append(value in row) assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth)) #################################################################################### def test_copy(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 100, [shape.rows, shape.cols]) cArray.setArray(data) assert np.array_equal(NumCpp.copy(cArray), data) #################################################################################### def test_copysign(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(-100, 100, [shape.rows, shape.cols]) data2 = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) cArray2.setArray(data2) assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2)) #################################################################################### def test_copyto(): shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray() data1 = np.random.randint(-100, 100, [shape.rows, shape.cols]) cArray1.setArray(data1) assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1) #################################################################################### def test_cos(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9)) #################################################################################### def test_cosh(): value = np.abs(np.random.rand(1).item()) assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9) components = np.random.rand(2).astype(np.double) value = complex(components[0], components[1]) assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9)) shapeInput = np.random.randint(20, 100, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols) cArray.setArray(data) assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9)) #################################################################################### def test_count_nonzero(): shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32) cArray.setArray(data) assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data) shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 3, [shape.rows, shape.cols]) imag = np.random.randint(1, 3, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data) shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32) cArray.setArray(data) assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0)) shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 3, [shape.rows, shape.cols]) imag = np.random.randint(1, 3, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0)) shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArray(shape) data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32) cArray.setArray(data) assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1)) shapeInput = np.random.randint(1, 50, [2, ]) shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) cArray = NumCpp.NdArrayComplexDouble(shape) real = np.random.randint(1, 3, [shape.rows, shape.cols]) imag = np.random.randint(1, 3, [shape.rows, shape.cols]) data = real + 1j * imag cArray.setArray(data) assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1)) #################################################################################### def test_cross(): shape = NumCpp.Shape(1, 2) cArray1 = NumCpp.NdArray(shape) cArray2 = NumCpp.NdArray(shape) data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double) data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double) cArray1.setArray(data1) cArray2.setArray(data2) assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() ==
np.cross(data1, data2)
numpy.cross
# -*- coding: utf-8 -*- ########### SVN repository information ################### # $Date: 2017-10-23 11:39:16 -0500 (Mon, 23 Oct 2017) $ # $Author: vondreele $ # $Revision: 3136 $ # $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/imports/G2sad_xye.py $ # $Id: G2sad_xye.py 3136 2017-10-23 16:39:16Z vondreele $ ########### SVN repository information ################### ''' *Module G2sad_xye: read small angle data* ------------------------------------------------ Routines to read in small angle data from an .xye type file, with two-theta or Q steps. ''' from __future__ import division, print_function import os.path as ospath import numpy as np import GSASIIobj as G2obj import GSASIIpath GSASIIpath.SetVersionNumber("$Revision: 3136 $") npasind = lambda x: 180.*np.arcsin(x)/np.pi class txt_XRayReaderClass(G2obj.ImportSmallAngleData): 'Routines to import X-ray q SAXD data from a .xsad or .xdat file' def __init__(self): super(self.__class__,self).__init__( # fancy way to self-reference extensionlist=('.xsad','.xdat'), strictExtension=False, formatName = 'q (A-1) step X-ray QIE data', longFormatName = 'q (A-1) stepped X-ray text data file in Q,I,E order; E optional' ) # Validate the contents -- make sure we only have valid lines def ContentsValidator(self, filename): 'Look through the file for expected types of lines in a valid q-step file' Ndata = 0 fp = open(filename,'r') for i,S in enumerate(fp): vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] Ndata += 1 except ValueError: pass fp.close() if not Ndata: self.errors = 'No 2 or more column numeric data found' return False return True # no errors encountered def Reader(self,filename, ParentFrame=None, **unused): print ('Read a q-step text file') x = [] y = [] w = [] wave = 1.5428 #Cuka default Temperature = 300 fp = open(filename,'r') for i,S in enumerate(fp): if len(S) == 1: #skip blank line continue if '=' in S: self.comments.append(S[:-1]) if 'wave' in S.split('=')[0].lower(): try: wave = float(S.split('=')[1]) except: pass continue vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] x.append(float(data[0])) f = float(data[1]) if f <= 0.0: del x[-1] continue elif len(vals) > 2: y.append(float(data[1])) w.append(1.0/float(data[2])**2) else: y.append(float(data[1])) w.append(1.0/float(data[1])) except ValueError: msg = 'Error in line '+str(i+1) print (msg) continue fp.close() N = len(x) for S in self.comments: if 'Temp' in S.split('=')[0]: try: Temperature = float(S.split('=')[1]) except: pass self.instdict['wave'] = wave self.instdict['type'] = 'LXC' x = np.array(x) self.smallangledata = [ x, # x-axis values q np.array(y), # small angle pattern intensities np.array(w), # 1/sig(intensity)^2 values (weights) np.zeros(N), # calc. intensities (zero) np.zeros(N), # obs-calc profiles np.zeros(N), # fix bkg ] self.smallangleentry[0] = filename self.smallangleentry[2] = 1 # xye file only has one bank self.idstring = ospath.basename(filename) # scan comments for temperature self.Sample['Temperature'] = Temperature return True class txt_nmXRayReaderClass(G2obj.ImportSmallAngleData): 'Routines to import X-ray q SAXD data from a .xsad or .xdat file, q in nm-1' def __init__(self): super(self.__class__,self).__init__( # fancy way to self-reference extensionlist=('.xsad','.xdat'), strictExtension=False, formatName = 'q (nm-1) step X-ray QIE data', longFormatName = 'q (nm-1) stepped X-ray text data file in Q,I,E order; E optional' ) # Validate the contents -- make sure we only have valid lines def ContentsValidator(self, filename): 'Look through the file for expected types of lines in a valid q-step file' Ndata = 0 fp = open(filename,'r') for i,S in enumerate(fp): vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] Ndata += 1 except ValueError: pass fp.close() if not Ndata: self.errors = 'No 2 or more column numeric data found' return False return True # no errors encountered def Reader(self,filename, ParentFrame=None, **unused): print ('Read a q-step text file') x = [] y = [] w = [] wave = 1.5428 #Cuka default Temperature = 300 fp = open(filename,'r') for i,S in enumerate(fp): if len(S) == 1: #skip blank line continue if '=' in S: self.comments.append(S[:-1]) if 'wave' in S.split('=')[0].lower(): try: wave = float(S.split('=')[1]) except: pass continue vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] x.append(float(data[0])/10.) #convert nm-1 to A-1 f = float(data[1]) if f <= 0.0: x.pop() continue elif len(vals) > 2: y.append(float(data[1])) w.append(1.0/float(data[2])**2) else: y.append(float(data[1])) w.append(1.0/float(data[1])) except ValueError: msg = 'Error in line '+str(i+1) print (msg) continue fp.close() N = len(x) for S in self.comments: if 'Temp' in S.split('=')[0]: try: Temperature = float(S.split('=')[1]) except: pass self.instdict['wave'] = wave self.instdict['type'] = 'LXC' x = np.array(x) self.smallangledata = [ x, # x-axis values q np.array(y), # small angle pattern intensities np.array(w), # 1/sig(intensity)^2 values (weights) np.zeros(N), # calc. intensities (zero) np.zeros(N), # obs-calc profiles np.zeros(N), # fix bkg ] self.smallangleentry[0] = filename self.smallangleentry[2] = 1 # xye file only has one bank self.idstring = ospath.basename(filename) # scan comments for temperature self.Sample['Temperature'] = Temperature return True class txt_CWNeutronReaderClass(G2obj.ImportSmallAngleData): 'Routines to import neutron CW q SAXD data from a .nsad or .ndat file' def __init__(self): super(self.__class__,self).__init__( # fancy way to self-reference extensionlist=('.nsad','.ndat'), strictExtension=False, formatName = 'q (A-1) step neutron CW QIE data', longFormatName = 'q (A-1) stepped neutron CW text data file in Q,I,E order; E optional' ) # Validate the contents -- make sure we only have valid lines def ContentsValidator(self, filename): 'Look through the file for expected types of lines in a valid q-step file' Ndata = 0 fp = open(filename,'r') for i,S in enumerate(fp): vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] Ndata += 1 except ValueError: pass fp.close() if not Ndata: self.errors = 'No 2 or more column numeric data found' return False return True # no errors encountered def Reader(self,filename, ParentFrame=None, **unused): print ('Read a q-step text file') x = [] y = [] w = [] wave = 1.5428 #Cuka default Temperature = 300 fp = open(filename,'r') for i,S in enumerate(fp): if len(S) == 1: #skip blank line continue if '=' in S: self.comments.append(S[:-1]) if 'wave' in S.split('=')[0].lower(): try: wave = float(S.split('=')[1]) except: pass continue vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] x.append(float(data[0])) f = float(data[1]) if f <= 0.0: y.append(0.0) w.append(1.0) elif len(vals) > 2: y.append(float(data[1])) w.append(1.0/float(data[2])**2) else: y.append(float(data[1])) w.append(1.0/float(data[1])) except ValueError: msg = 'Error in line '+str(i+1) print (msg) continue fp.close() N = len(x) for S in self.comments: if 'Temp' in S.split('=')[0]: try: Temperature = float(S.split('=')[1]) except: pass self.instdict['wave'] = wave self.instdict['type'] = 'LNC' x = np.array(x) if np.any(x > 2.): #q must be nm-1 x /= 10. self.smallangledata = [ x, # x-axis values q np.array(y), # small angle pattern intensities np.array(w), # 1/sig(intensity)^2 values (weights) np.zeros(N), # calc. intensities (zero) np.zeros(N), # obs-calc profiles np.zeros(N), # fix bkg ] self.smallangleentry[0] = filename self.smallangleentry[2] = 1 # xye file only has one bank self.idstring = ospath.basename(filename) # scan comments for temperature self.Sample['Temperature'] = Temperature return True class txt_nmCWNeutronReaderClass(G2obj.ImportSmallAngleData): 'Routines to import neutron CW q in nm-1 SAXD data from a .nsad or .ndat file' def __init__(self): super(self.__class__,self).__init__( # fancy way to self-reference extensionlist=('.nsad','.ndat'), strictExtension=False, formatName = 'q (nm-1) step neutron CW QIE data', longFormatName = 'q (nm-1) stepped neutron CW text data file in Q,I,E order; E optional' ) # Validate the contents -- make sure we only have valid lines def ContentsValidator(self, filename): 'Look through the file for expected types of lines in a valid q-step file' Ndata = 0 fp = open(filename,'r') for i,S in enumerate(fp): vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] Ndata += 1 except ValueError: pass fp.close() if not Ndata: self.errors = 'No 2 or more column numeric data found' return False return True # no errors encountered def Reader(self,filename, ParentFrame=None, **unused): print ('Read a q-step text file') x = [] y = [] w = [] wave = 1.5428 #Cuka default Temperature = 300 fp = open(filename,'r') for i,S in enumerate(fp): if len(S) == 1: #skip blank line continue if '=' in S: self.comments.append(S[:-1]) if 'wave' in S.split('=')[0].lower(): try: wave = float(S.split('=')[1]) except: pass continue vals = S.split() if len(vals) >= 2: try: data = [float(val) for val in vals] x.append(float(data[0])/10.) #convert to A-1 f = float(data[1]) if f <= 0.0: y.append(0.0) w.append(1.0) elif len(vals) > 2: y.append(float(data[1])) w.append(1.0/float(data[2])**2) else: y.append(float(data[1])) w.append(1.0/float(data[1])) except ValueError: msg = 'Error in line '+str(i+1) print (msg) continue fp.close() N = len(x) for S in self.comments: if 'Temp' in S.split('=')[0]: try: Temperature = float(S.split('=')[1]) except: pass self.instdict['wave'] = wave self.instdict['type'] = 'LNC' x = np.array(x) self.smallangledata = [ x, # x-axis values q np.array(y), # small angle pattern intensities np.array(w), # 1/sig(intensity)^2 values (weights) np.zeros(N), # calc. intensities (zero) np.zeros(N), # obs-calc profiles
np.zeros(N)
numpy.zeros
#!/usr/bin/env python # coding: utf-8 # # # LMC 3D structure Final Version with Systematics # # np.random.choice([Roger,Hector, Alfred,Luis,Angel,Xavi]) # In[ ]: ####################### #### Load packages #### ####################### from scipy import stats import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import LogNorm # import warnings import sys import numpy as np import pandas as pd import time import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.mixture import GaussianMixture from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import ConstantKernel from scipy.optimize import curve_fit from scipy.stats import gaussian_kde from scipy.interpolate import Rbf from scipy.stats import multivariate_normal from scipy.linalg import pinv def rbf(X,y,k): idx = np.random.randint(np.size(X,axis = 0),size = k) centroids = X[idx,:] xcross = np.dot(X,X.T) xnorms = np.repeat(np.diag(np.dot(X,X.T)).reshape(1,-1),np.size(X,axis=0),axis=0) sigma = np.median(xnorms-2.*xcross+xnorms.T) n = X.shape[0] values = [] for x in X: for c in centroids: values.append(np.exp(-np.sum((x-c)**2.)/sigma)) phiX = np.reshape(values,(n,k)) psinv = pinv(np.dot(phiX.T,phiX)) w = np.dot(psinv,np.dot(phiX.T,y)) return w,centroids,sigma def rbf_predict(Xhat,w,centroids,sigma): n = Xhat.shape[0] k = centroids.shape[0] values = [] for x in Xhat: for c in centroids: values.append(np.exp(-np.sum((x-c)**2.)/sigma)) phi_Xhat = np.reshape(values,(n,k)) return np.dot(phi_Xhat,w) def proper2geo_fn(xyz,distCenterLMC,alphaCenterLMC,deltaCenterLMC, posAngleLMC,inclAngleLMC): # Transform samples of location coordinates in the proper frame of the LMC # to the rectangular heliocentric frame # # References: # <NAME> & Cioni (2001) # Weinberg and Nikolaev (2001) # # Parameters: # -xyz A tensor of shape=(N, 3) containing N samples in the # proper LMC frame # -N No of samples # -distCenterLMC Distance to the LMC centre (kpc) # -alphaCenterLMC RA of the LMC centre (rad) # -deltaCenterLMC Dec of the LMC centre (rad) # -posAngleLMC Position angle of the LON measured w.r.t. the North (rad) # -inclAngleLMC Inclination angle (rad) # # Return: A tensor of shape=(N, 3) containing N samples of rectangular # coordinates in the heliocentric frame # Affine transformation from local LMC frame to heliocentric frame s11 = np.sin(alphaCenterLMC) s12 = -np.cos(alphaCenterLMC) * np.sin(deltaCenterLMC) s13 = -np.cos(alphaCenterLMC) * np.cos(deltaCenterLMC) s21 = -np.cos(alphaCenterLMC) s22 = -np.sin(alphaCenterLMC) *
np.sin(deltaCenterLMC)
numpy.sin
# -*- coding: utf-8 -*- """ Defines unit tests for :mod:`colour.algebra.matrix` module. """ from __future__ import division, unicode_literals import numpy as np import unittest from colour.algebra import is_identity __author__ = '<NAME>' __copyright__ = 'Copyright (C) 2013-2019 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '<EMAIL>' __status__ = 'Production' __all__ = ['TestIsIdentity'] class TestIsIdentity(unittest.TestCase): """ Defines :func:`colour.algebra.matrix.is_identity` definition unit tests methods. """ def test_is_identity(self): """ Tests :func:`colour.algebra.matrix.is_identity` definition. """ self.assertTrue( is_identity(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape([3, 3]))) self.assertFalse( is_identity(
np.array([1, 2, 0, 0, 1, 0, 0, 0, 1])
numpy.array
from __future__ import print_function ''' generally for reading db's having bb's or pixlevel pascal voc kitti mapillary http://host.robots.ox.ac.uk/pascal/VOC/databases.html#VOC2005_2 ''' __author__ = 'jeremy' import os import cv2 import sys import re import pdb import csv import xml.etree.ElementTree as ET import pickle import os from os import listdir, getcwd from os.path import join import json import random import logging logging.basicConfig(level=logging.DEBUG) from multiprocessing import Pool from functools import partial from itertools import repeat import copy import numpy as np import time import random #for mapillary, got lazy and not using cv2 instead of original PIL import json import numpy as np import matplotlib.pyplot as plt from PIL import Image from trendi import Utils from trendi.classifier_stuff.caffe_nns import create_nn_imagelsts from trendi.utils import imutils from trendi import constants from trendi import kassper from trendi import background_removal #from trendi.utils import augment_images def kitti_to_tgdict(label_dir='/data/jeremy/image_dbs/hls/kitti/training/label_2', image_dir = '/data/jeremy/image_dbs/hls/kitti/training/image_2',visual_output=True, write_json=True,jsonfile=None,img_suffix='.png',label_suffix='.txt'): ''' reads data at http://www.vision.caltech.edu/Image_Datasets/CaltechPedestrians/datasets/USA/ which has a file for each image, filenames 000000.txt, 000001.txt etc, each file has a line like: Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01 in format: 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. :param dir: :return: ''' #todo - use perspective transform (useful for hls...) along the lines of below, maybe use semirandom trapezoid for 4 points # pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) # pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]]) # M = cv2.getPerspectiveTransform(pts1,pts2) # dst = cv2.warpPerspective(img,M,(300,300)) files = [os.path.join(label_dir,f) for f in os.listdir(label_dir)] files.sort() types=[] all_annotations = [] n=0 n_tot = len(files) for f in files: # filename = os.path.join(dir,'%06d.txt'%i) n=n+1 print('{}/{} {}'.format(n,n_tot,f)) if not os.path.exists(f): print('{} not found'.format(f)) continue result_dict = {} # result_dict['data']=[] f_dir = os.path.dirname(f) par_dir = Utils.parent_dir(f_dir) f_base = os.path.basename(f) img_base = f_base.replace(label_suffix,img_suffix) img_file = os.path.join(image_dir,img_base) result_dict['filename']=img_file result_dict['annotations']=[] img_arr = cv2.imread(img_file) if img_arr is None: logging.warning('could not get img arr for {}'.format(img_file)) h,w=10000,10000 else: result_dict['dimensions_h_w_c'] = img_arr.shape h,w=img_arr.shape[0:2] print('got image h{} x w{} '.format(h,w)) with open(f,'r' ) as fp: lines = fp.readlines() n_line=0 n_lines=len(lines) for line in lines: n_line=n_line+1 print('{}/{} '.format(n_line,n_lines)+ line) try: elements = line.split() type=elements[0] truncated=elements[1] occluded=elements[2] alpha=elements[3] x1=int(float(elements[4])) y1=int(float(elements[5])) x2=int(float(elements[6])) y2=int(float(elements[7])) except: print("error getting elements from line:", sys.exc_info()[0]) print('{} {} x1 {} y1 {} x2 {} y2 {}'.format(f,type,x1,y1,x2,y2)) x1=max(0,x1) y1=max(0,y1) x2=min(w,x2) y2=min(h,y2) tg_type = constants.kitti_to_hls_map[type] print('converted: {} x1 {} y1 {} x2 {} y2 {}'.format(tg_type,x1,y1,x2,y2)) if tg_type is None: logging.info('tgtype for {} is None, moving on'.format(type)) continue bb_xywh = [x1,y1,(x2-x1),(y2-y1)] if not type in types: #this is keeping track of all types seen in case above list is incomplete types.append(type) print('types:'+str(types)) object_dict={} object_dict['bbox_xywh'] = bb_xywh object_dict['object']= tg_type object_dict['original_object'] = type result_dict['annotations'].append(object_dict) if visual_output: print('drawing bb') img_arr=imutils.bb_with_text(img_arr,bb_xywh,tg_type) if visual_output: cv2.imshow('kitti2tgdict',img_arr) cv2.waitKey(0) all_annotations.append(result_dict) if write_json: print('writing json') if jsonfile == None: labeldir_alone = label_dir.split('/')[-1] par_dir = Utils.parent_dir(label_dir) jsonfile = os.path.join(par_dir,labeldir_alone+'.json') print('jsonfile:'+str(jsonfile)) Utils.ensure_file(jsonfile) with open(jsonfile,'w ') as fp: json.dump(all_annotations,fp,indent=4) fp.close() def read_rmptfmp_write_yolo(images_dir='/data/jeremy/image_dbs/hls/data.vision.ee.ethz.ch',gt_file='refined.idl',class_no=0,visual_output=False,label_destination='labels'): ''' reads from gt for dataset from https://data.vision.ee.ethz.ch/cvl/aess/dataset/ (pedestrians only) '"left/image_00000001.png": (212, 204, 232, 261):-1, (223, 181, 259, 285):-1, (293, 151, 354, 325):-1, (452, 208, 479, 276):-1, (255, 219, 268, 249):-1, (280, 219, 291, 249):-1, (267, 246, 279, 216):-1, (600, 247, 584, 210):-1;' writes to yolo format ''' # Define the codec and create VideoWriter object # not necessary fot function , just wanted to track boxes # fourcc = cv2.VideoWriter_fourcc(*'XVID') # out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480)) # pdb.set_trace() with open(os.path.join(images_dir,gt_file),'r') as fp: lines = fp.readlines() for line in lines: print(line) elements = re.findall(r"[-\w']+",line) print(elements) # elements = line.split imgname = line.split()[0].replace('"','').replace(':','').replace('\n','')#.replace('.png','_0.png') # print('img name '+str(imgname)) imgname = os.path.basename(imgname) #ignore dir referred to in gt file and use mine if imgname[-6:] != '_0.png': print('imgname {} has no _0 at end'.format(imgname)) imgname = imgname.replace('.png','_0.png') fullpath=os.path.join(images_dir,imgname) if not os.path.isfile(fullpath): print('couldnt find {}'.format(fullpath)) continue print('reading {}'.format(fullpath)) img_arr = cv2.imread(fullpath) img_dims = (img_arr.shape[1],img_arr.shape[0]) #widthxheight png_element_index = elements.index('png') bb_list_xywh = [] ind = png_element_index+1 n_bb=0 while ind<len(elements): x1=int(elements[ind]) if x1 == -1: ind=ind+1 x1=int(elements[ind]) y1=int(elements[ind+1]) x2=int(elements[ind+2]) y2=int(elements[ind+3]) ind = ind+4 if y2 == -1: print('XXX warning, got a -1 XXX') n_bb += 1 bb = Utils.fix_bb_x1y1x2y2([x1,y1,x2,y2]) bb_xywh = [bb[0],bb[1],bb[2]-bb[0],bb[3]-bb[1]] bb_list_xywh.append(bb_xywh) print('ind {} x1 {} y1 {} x2 {} y2 {} bbxywh {}'.format(ind,x1,y1,x2,y2,bb_xywh)) if visual_output: cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2) print('{} bounding boxes for this image (png {} len {} '.format(n_bb,png_element_index,len(elements))) print('sending {} for writing'.format(bb_list_xywh)) write_yolo_labels(fullpath,bb_list_xywh,class_no,img_dims) if visual_output: cv2.imshow('img',img_arr) cv2.waitKey(0) # out.write(img_arr) # out.release() if visual_output: cv2.destroyAllWindows() def write_yolo_labels(img_path,bb_list_xywh,class_number,image_dims,destination_dir=None,overwrite=True): ''' output : for yolo - https://pjreddie.com/darknet/yolo/ Darknet wants a .txt file for each image with a line for each ground truth object in the image that looks like: <object-class> <x> <y> <width> <height> where those are percentages... it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs. named: JPEGImages labels and a train.txt file pointing to just the images - and the label files are same names with .txt instead of .jpg :param img_path: :param bb_xywh: :param class_number: :param destination_dir: :return: ''' if destination_dir is None: destination_dir = Utils.parent_dir(os.path.basename(img_path)) destination_dir = os.path.join(destination_dir,'labels') Utils.ensure_dir(destination_dir) img_basename = os.path.basename(img_path) img_basename = img_basename.replace('.jpg','.txt').replace('.png','.txt').replace('.bmp','.txt') destination_path=os.path.join(destination_dir,img_basename) if overwrite: write_mode = 'w' else: write_mode = 'a' with open(destination_path,write_mode) as fp: for bb_xywh in bb_list_xywh: x_center = bb_xywh[0]+bb_xywh[2]/2.0 y_center = bb_xywh[1]+bb_xywh[3]/2.0 x_p = float(x_center)/image_dims[0] y_p = float(y_center)/image_dims[1] w_p = float(bb_xywh[2])/image_dims[0] h_p = float(bb_xywh[3])/image_dims[1] line = str(class_number)+' '+str(round(x_p,4))+' '+str(round(y_p,4))+' '+str(round(w_p,4))+' '+str(round(h_p,4))+'\n' print('writing "{}" to {}'.format(line[:-1],destination_path)) fp.write(line) fp.close() # if not os.exists(destination_path): # Utils.ensure_file(destination_path) def write_yolo_trainfile(image_dir,trainfile='train.txt',filter='.png',split_to_test_and_train=0.05,check_for_bbfiles=True,bb_dir=None,labels_dir=None): ''' this is just a list of full paths to the training images. the labels apparently need to be in parallel dir(s) called 'labels' note this appends to trainfile , doesnt overwrite , to facilitate building up from multiple sources :param dir: :param trainfile: :return: ''' if filter: files = [os.path.join(image_dir,f) for f in os.listdir(image_dir) if filter in f] else: files = [os.path.join(image_dir,f) for f in os.listdir(image_dir)] print('{} files w filter {} in {}'.format(len(files),filter,image_dir)) if check_for_bbfiles: if bb_dir == None: if labels_dir: labeldir = os.path.basename(image_dir)+labels_dir else: labeldir = os.path.basename(image_dir) bb_dir = os.path.join(Utils.parent_dir(image_dir),labeldir) print('checking for bbs in '+bb_dir) if len(files) == 0: print('no files fitting {} in {}, stopping'.format(filter,image_dir)) return count = 0 with open(trainfile,'a+') as fp: for f in files: if check_for_bbfiles: if filter: bbfile = os.path.basename(f).replace(filter,'.txt') else: bbfile = os.path.basename(f)[:-4]+'.txt' bbpath = os.path.join(bb_dir,bbfile) if os.path.exists(bbpath): fp.write(f+'\n') count +=1 else: print('bbfile {} describing {} not found'.format(bbpath,f)) else: fp.write(f+'\n') count += 1 print('wrote {} files to {}'.format(count,trainfile)) if split_to_test_and_train is not None: create_nn_imagelsts.split_to_trainfile_and_testfile(trainfile,fraction=split_to_test_and_train) def yolo_to_tgdict(txt_file=None,img_file=None,visual_output=False,img_suffix='.jpg',classlabels=constants.hls_yolo_categories,labels_dir_suffix=None,dont_write_blank=True): ''' format is <object-class> <x> <y> <width> <height> where x,y,w,h are relative to image width, height. It looks like x,y are bb center, not topleft corner - see voc_label.py in .convert(size,box) func :param txt_file: :return: a 'tgdict' which looks like { "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg", "annotations": [ { "bbox_xywh": [89, 118, 64,44 ], "object": "car" } ... ] } using convention that label dir is at same level as image dir and has 'labels' tacked on to end of dirname ''' # img_file = txt_file.replace('.txt','.png') logging.debug('yolo to tgdict {} {} '.format(txt_file,img_file)) if txt_file is None and img_file is None: logging.warning('yolo to tfdict got no txtfile nor imgfile') return if txt_file is not None and img_file is None: txt_dir = os.path.dirname(txt_file) par_dir = Utils.parent_dir(txt_file) if 'labels' in par_dir: img_dir = par_dir.replace('labels','') img_name = os.path.basename(txt_file).replace('.txt',img_suffix) img_file = os.path.join(img_dir,img_name) logging.debug('looking for image file '+img_file) elif img_file is not None and txt_file is None: img_dir = os.path.dirname(img_file) img_base = os.path.basename(img_file) par_dir = Utils.parent_dir(img_dir) logging.debug('pardir {} imgdir {}'.format(par_dir,img_dir)) if labels_dir_suffix: labels_dir = img_dir+labels_dir_suffix else: labels_dir = img_dir lbl_name = os.path.basename(img_file).replace('.jpg','.txt').replace('.png','.txt').replace('.jpeg','.txt') txt_file = os.path.join(labels_dir,lbl_name) elif img_file is not None and txt_file is not None: pass logging.info('lblfile {} imgfile {}'.format(txt_file,img_file)) img_arr = cv2.imread(img_file) if img_arr is None: logging.warning('problem reading {}, returning'.format(img_file)) return None image_h, image_w = img_arr.shape[0:2] result_dict = {} result_dict['filename']=img_file result_dict['dimensions_h_w_c']=img_arr.shape result_dict['annotations']=[] if not os.path.exists(txt_file): logging.warning('yolo2tgdict could not find {}, trying replacing "images" with "labels" '.format(txt_file)) #try alternate path replacing 'images' with 'labels' if 'images' in img_file: img_dir = os.path.dirname(img_file) img_base = os.path.basename(img_file) labels_dir = img_dir.replace('images','labels') lbl_name = os.path.basename(img_file).replace('.jpg','.txt').replace('.png','.txt') txt_file = os.path.join(labels_dir,lbl_name) if not os.path.exists(txt_file): logging.warning('yolo2tgdict could not find {}, returning '.format(txt_file)) return else: return with open(txt_file,'r') as fp: lines = fp.readlines() logging.debug('{} bbs found'.format(len(lines))) if lines == []: logging.warning('no lines in {}'.format(txt_file)) for line in lines: if line.strip()[0]=='#': logging.debug('got comment line') continue class_index,x,y,w,h = line.split() x_p=float(x) y_p=float(y) w_p=float(w) h_p=float(h) class_index = int(class_index) class_label = classlabels[class_index] x_center = int(x_p*image_w) y_center = int(y_p*image_h) w = int(w_p*image_w) h = int(h_p*image_h) x1 = x_center-w/2 x2 = x_center+w/2 y1 = y_center-h/2 y2 = y_center+h/2 logging.info('class {} x_c {} y_c {} w {} h {} x x1 {} y1 {} x2 {} y2 {}'.format(class_index,x_center,y_center,w,h,x1,y1,x2,y2)) if visual_output: cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2) object_dict={} object_dict['bbox_xywh'] = [x1,y1,w,h] object_dict['object']=class_label result_dict['annotations'].append(object_dict) if visual_output: cv2.imshow('yolo2tgdict',img_arr) cv2.waitKey(0) return result_dict def tgdict_to_yolo(tg_dict,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile='yolo_train.txt'): ''' changing save dir to be same as img dir input- dict in 'tg format' which is like this {'filename':'image423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId':104}], {'filename':'image423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId',105} That json can then be used to generate yolo or frcnn training files output : for yolo - https://pjreddie.com/darknet/yolo/ Darknet wants a .txt file for each image with a line for each ground truth object in the image that looks like: <object-class> <x> <y> <width> <height> where those are percentages... it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs named [whatever]images and [whatever]labels: e.g. JPEGImages labels and a train.txt file pointing to just the images - the label files are same names with .txt instead of .jpg also writes a line in the yolo_trainfile . This is all getting called by json_to_yolo :param img_path: :param bb_xywh: :param class_number: :param destination_dir: :return: ''' img_filename = tg_dict['filename'] annotations = tg_dict['annotations'] sid = None if 'sid' in tg_dict: sid = tg_dict['sId'] dims = tg_dict['dimensions_h_w_c'] im_h,im_w=(dims[0],dims[1]) logging.debug('writing yolo for file {}\nannotations {}'.format(img_filename,annotations)) if label_dir is None: label_dir = os.path.dirname(img_filename) label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt') if label_name[-4:]!='.txt': logging.warning('did not replace suffix of {} with .txt'.format(img_filename)) label_path = os.path.join(label_dir,label_name) print('writing yolo to '+str(label_path)) with open(label_path,'w') as fp: for annotation in annotations: bb_xywh = annotation['bbox_xywh'] bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w)) logging.info('dims {} bbxywh {} bbyolo {}'.format((im_w,im_h),bb_xywh,bb_yolo)) object = annotation['object'] class_number = classes.index(object) line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n' fp.write(line) fp.close() Utils.ensure_file(yolo_trainfile) with open(yolo_trainfile,'a') as fp2: fp2.write(img_filename+'\n') fp2.close() def json_vietnam_to_yolo(jsonfile,split_to_test_and_train=True,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile=None,check_dims=True,visual_output=True): ''' input- json dicts in 'vietname rmat' which is like this {"objects":[{"label":"Private Car","x_y_w_h":[1160,223,65,59]},{"label":"Private Car","x_y_w_h":[971,354,127,85]}],"image_path":"2017-07-06_09-24-24-995.jpeg","image_w_h":[1600,900]} output : for yolo - https://pjreddie.com/darknet/yolo/ looking like <object-class> <x> <y> <width> <height> where x,y,width,height are percentages... it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs named [whatever]images and [whatever]labels: e.g. JPEGImages labels and a train.txt file pointing to just the images - the label files are same names with .txt instead of .jpg :param img_path: :param bb_xywh: :param class_number: :param destination_dir: :return: ''' print('converting json annotations in '+jsonfile+' to yolo') with open(jsonfile,'r') as fp: vietnam_dict = json.load(fp) img_filename = vietnam_dict['image_path'] annotations = vietnam_dict['objects'] dims = vietnam_dict['image_w_h'] im_h,im_w=(dims[1],dims[0]) logging.debug('writing yolo for image {} hxw {}x{}\nannotations {} '.format(img_filename,im_h,im_w,annotations)) if check_dims or visual_output: if not os.path.isabs(img_filename): file_path = os.path.join(os.path.dirname(jsonfile),img_filename) else: file_path = img_filename if not os.path.exists(file_path): logging.warning('{} does not exist'.format(file_path)) img_arr = cv2.imread(file_path) if img_arr is None: logging.warning('could not find {}'.format(file_path)) return actual_h,actual_w = img_arr.shape[0:2] if actual_h!=im_h or actual_w != im_w: logging.warning('image dims hw {} {} dont match json {}'.format(actual_h,actual_w,im_h,im_w)) return if label_dir is None: img_parent = Utils.parent_dir(os.path.dirname(img_filename)) img_diralone = os.path.dirname(img_filename).split('/')[-1] label_diralone = img_diralone+'labels' # label_dir= os.path.join(img_parent,label_diralone) label_dir = os.path.dirname(img_filename) #keep labels and imgs in same dir, yolo is apparently ok with that print('using label dir {}'.format(label_dir)) Utils.ensure_dir(label_dir) # label_dir = os.path.join(img_parent,label_ext) logging.debug('yolo img parent {} labeldir {} imgalone {} lblalone {} '.format(img_parent,label_dir,img_diralone,label_diralone)) label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt') if label_name[-4:]!='.txt': logging.warning('did not replace image suffix of {} with .txt'.format(img_filename)) return label_path = os.path.join(label_dir,label_name) print('writing label to '+str(label_path)) with open(label_path,'w') as fp: for annotation in annotations: bb_xywh = annotation['x_y_w_h'] bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w)) object = annotation['label'] if not object in constants.vietnam_to_hls_map: logging.warning('{} not found in constants.vietname to hls map'.format(object)) raw_input('ret to cont') continue tg_object = constants.vietnam_to_hls_map[object] class_number = classes.index(tg_object) logging.debug('wxh {} bbxywh {} bbyolo {}\norigobj {} tgobj {} ind {}'.format((im_w,im_h),bb_xywh,bb_yolo,object,tg_object,class_number)) line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n' fp.write(line) if visual_output: img_arr = imutils.bb_with_text(img_arr,bb_xywh,tg_object) if visual_output: cv2.imshow('image',img_arr) cv2.waitKey(0) cv2.destroyAllWindows() fp.close() if yolo_trainfile is None: return with open(yolo_trainfile,'a') as fp2: fp2.write(file_path+'\n') fp2.close() def vietnam_dir_to_yolo(dir,visual_output=False): json_files = [os.path.join(dir,f) for f in os.listdir(dir) if '.json' in f] yolo_trainfile = dir+'filelist.txt' Utils.ensure_file(yolo_trainfile) print('{} .json files in {}'.format(len(json_files),dir)) label_dir = dir for json_file in json_files: json_vietnam_to_yolo(json_file,yolo_trainfile=yolo_trainfile,label_dir=label_dir,visual_output=visual_output) create_nn_imagelsts.split_to_trainfile_and_testfile(yolo_trainfile) return yolo_trainfile def read_many_yolo_bbs(imagedir='/data/jeremy/image_dbs/hls/data.vision.ee.ethz.ch/left/',labeldir=None,img_filter='.png'): if labeldir is None: labeldir = os.path.join(Utils.parent_dir(imagedir),'labels') imgfiles = [f for f in os.listdir(imagedir) if img_filter in f] imgfiles = sorted(imgfiles) print('found {} files in {}, label dir {}'.format(len(imgfiles),imagedir,labeldir)) for f in imgfiles: bb_path = os.path.join(labeldir,f).replace(img_filter,'.txt') if not os.path.isfile(bb_path): print('{} not found '.format(bb_path)) continue image_path = os.path.join(imagedir,f) read_yolo_bbs(bb_path,image_path) def read_pascal_xml_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2007/VOCdevkit/VOC2007',annotation_folder='Annotations',img_folder='JPEGImages', annotation_filter='.xml'): ''' nondestructive - if there are already label files these get added to not overwritten :param dir: :param annotation_folder: :param img_folder: :param annotation_filter: :return: ''' # classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt', # 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike'] classes = constants.hls_yolo_categories annotation_dir = os.path.join(dir,annotation_folder) img_dir = os.path.join(dir,img_folder) annotation_files = [os.path.join(annotation_dir,f) for f in os.listdir(annotation_dir) if annotation_filter in f] listfilename = os.path.join(dir,'filelist.txt') list_file = open(listfilename, 'w') for annotation_file in annotation_files: success = convert_pascal_xml_annotation(annotation_file,classes) if success: print('found relevant class(es)') filenumber = os.path.basename(annotation_file).replace('.xml','') jpgpath = os.path.join(img_dir,str(filenumber)+'.jpg') list_file.write(jpgpath+'\n') def convert_pascal_xml_annotation(in_file,classes,labeldir=None): filenumber = os.path.basename(in_file).replace('.xml','') # in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id)) if labeldir==None: parent_dir = Utils.parent_dir(os.path.dirname(in_file)) labeldir = os.path.join(parent_dir,'labels') Utils.ensure_dir(labeldir) out_filename = os.path.join(labeldir, filenumber+'.txt') print('in {} out {}'.format(in_file,out_filename)) tree=ET.parse(in_file) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) success=False for obj in root.iter('object'): difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult)==1: continue cls_id = classes.index(cls) xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) bb = convert_x1x2y1y2_to_yolo((w,h), b) out_file = open(out_filename, 'a+') os.chmod(out_filename, 0o666) out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n') # os.chmod(out_filename, 0o777) success = True return(success) def read_pascal_txt_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2005_1/', annotation_folder='all_relevant_annotations',img_folder='all_relevant_images', annotation_filter='.txt',image_filter='.png',yolo_annotation_dir='labels'): ''' nondestructive - if there are already label files these get added to not overwritten :param dir: :param annotation_folder: :param img_folder: :param annotation_filter: :return: ''' # classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt', # 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike'] classes = constants.hls_yolo_categories annotation_dir = os.path.join(dir,annotation_folder) img_dir = os.path.join(dir,img_folder) annotation_files = [os.path.join(annotation_dir,f) for f in os.listdir(annotation_dir) if annotation_filter in f] listfilename = os.path.join(dir,'filelist.txt') list_file = open(listfilename, 'w') yolo_annotation_path = os.path.join(dir,yolo_annotation_dir) Utils.ensure_dir(yolo_annotation_path) for annotation_file in annotation_files: out_filename=os.path.join(yolo_annotation_path,os.path.basename(annotation_file)) print('outfile'+out_filename) success = convert_pascal_txt_annotation(annotation_file,classes,out_filename) if success: print('found relevant class(es)') filename = os.path.basename(annotation_file).replace(annotation_filter,'') img_dir = os.path.join(dir,img_folder) imgpath = os.path.join(img_dir,str(filename)+image_filter) list_file.write(imgpath+'\n') def convert_pascal_txt_annotation(in_file,classes,out_filename): print('in {} out {}'.format(in_file,out_filename)) with open(in_file,'r') as fp: lines = fp.readlines() for i in range(len(lines)): if 'Image filename' in lines[i]: imfile=lines[i].split()[3] print('imfile:'+imfile) # path = Utils.parent_dir(os.path.basename(in_file)) # if path.split('/')[-1] != 'Annotations': # path = Utils.parent_dir(path) # print('path to annotation:'+str(path)) # img_path = os.path.join(path,imfile) # print('path to img:'+str(img_path)) # img_arr = cv2.imread(img_path) if 'Image size' in lines[i]: nums = re.findall('\d+', lines[i]) print(lines[i]) print('nums'+str(nums)) w = int(nums[0]) h = int(nums[1]) print('h {} w {}'.format(h,w)) if '# Details' in lines[i] : object = lines[i].split()[5].replace('(','').replace(')','').replace('"','') nums = re.findall('\d+', lines[i+2]) print('obj {} nums {}'.format(object,nums)) success=False cls_id = tg_class_from_pascal_class(object,classes) if cls_id is not None: print('class index '+str(cls_id)+' '+classes[cls_id]) success=True if not success: print('NO RELEVANT CLASS FOUND') continue b = (int(nums[1]), int(nums[3]), int(nums[2]), int(nums[4])) #file has xmin ymin xmax ymax print('bb_x1x2y1y2:'+str(b)) bb = convert_x1x2y1y2_to_yolo((w,h), b) print('bb_yolo'+str(bb)) if os.path.exists(out_filename): append_write = 'a' # append if already exists else: append_write = 'w' # make a new file if not out_file = open(out_filename, append_write) # os.chmod(out_filename, 0o666) # out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n') # os.chmod(out_filename, 0o777) success = True return(success) def tgdict_to_api_dict(tgdict): ''' convert a tgdict in format { "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg", "annotations": [ { "bbox_xywh": [89, 118, 64,44 ], "object": "car" } ... ] } to an api dict (returned by our api ) in format {"data": [{"confidence": 0.366, "object": "car", "bbox": [394, 49, 486, 82]}, {"confidence": 0.2606, "object": "car", "bbox": [0, 116, 571, 462]}, where bbox is [xmin,ymin,xmax,ymax] aka [x1,y1,x2,y2] :param tgdict: :return: ''' apidict={} apidict['data'] = [] for annotation in tgdict['annotations']: bb=annotation['bbox_xywh'] object=annotation['object'] api_entry={} api_entry['confidence']=None #tgdict doesnt have this, generally its a gt so its 100% api_entry['object']=object api_entry['bbox']=[bb[0],bb[1],bb[0]+bb[2],bb[1]+bb[3]] #api bbox is [xmin,ymin,xmax,ymax] aka [x1,y1,x2,y2] apidict['data'].append(api_entry) return apidict def tg_class_from_pascal_class(pascal_class,tg_classes): #hls_yolo_categories = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt', # 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike'] conversions = {'bike':'bicycle', 'motorcycle':'motorbike'} #things that have names different than tg names #(forced to do this since e.g. bike and bicycle are both used in VOC) for tg_class in tg_classes: if tg_class in pascal_class: tg_ind = tg_classes.index(tg_class) return tg_ind for pascal,tg in conversions.iteritems(): if pascal in pascal_class: tg_ind = tg_classes.index(tg) return tg_ind return None def json_to_yolo(jsonfile,split_to_test_and_train=True): ''' input- json arr of dicts in 'tg format' which is like this {'filename':'image423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h]}], output : for yolo - https://pjreddie.com/darknet/yolo/ looking lie <object-class> <x> <y> <width> <height> where x,y,width,height are percentages... it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs named [whatever]images and [whatever]labels: e.g. JPEGImages labels and a train.txt file pointing to just the images - the label files are same names with .txt instead of .jpg :param img_path: :param bb_xywh: :param class_number: :param destination_dir: :return: ''' print('converting json annotations in '+jsonfile+' to yolo') trainfile = 'yolo_train.txt' with open(jsonfile,'r') as fp: annotation_list = json.load(fp) for tg_dict in annotation_list: tgdict_to_yolo(tg_dict,yolo_trainfile=trainfile) create_nn_imagelsts.split_to_trainfile_and_testfile(trainfile) def autti_txt_to_yolo(autti_txt='/media/jeremy/9FBD-1B00/image_dbs/hls/object-dataset/labels.csv'): #to deal with driving file from autti # wget http://bit.ly/udacity-annotations-autti all_annotations = txt_to_tgdict(txtfile=autti_txt,image_dir=None,parsemethod=parse_autti) for tg_dict in all_annotations: tgdict_to_yolo(tg_dict) json_name = autti_txt.replace('.csv','.json') inspect_json(json_name) def udacity_csv_to_yolo(udacity_csv='/media/jeremy/9FBD-1B00/image_dbs/hls/object-detection-crowdai/labels.csv'): # to deal with driving file from udacity - # wget http://bit.ly/udacity-annoations-crowdai all_annotations = csv_to_tgdict(udacity_csv=udacity_csv,parsemethod=parse_udacity) for tg_dict in all_annotations: tgdict_to_yolo(tg_dict) json_name = udacity_csv.replace('.csv','.json') inspect_json(json_name) def parse_udacity(row): xmin=int(row['xmin']) xmax=int(row['ymin']) ymin=int(row['xmax']) ymax=int(row['ymax']) frame=row['Frame'] #aka filename label=row['Label'] label=label.lower() preview_url=row['Preview URL'] tg_object=convert_udacity_label_to_tg(label) if tg_object is None: #label didnt get xlated so its something we dont care about e.g streetlight print('object {} is not of interest'.format(label)) return xmin,xmax,ymin,ymax,frame,tg_object def parse_autti(row,delimiter=' '): #these parse guys should also have the translator (whatever classes into tg classes #autti looks like this # 178019968680240537.jpg 888 498 910 532 0 "trafficLight" "Red" # 1478019969186707568.jpg 404 560 540 650 0 "car" elements = row.split(delimiter) filename=elements[0] xmin=int(elements[1]) ymin=int(elements[2]) xmax=int(elements[3]) ymax=int(elements[4]) #something i'm ignoring in row[5] label=elements[6].replace('"','').replace("'","").replace('\n','').replace('\t','') label=label.lower() assert(xmin<xmax) assert(ymin<ymax) tg_object=convert_udacity_label_to_tg(label) if tg_object is None: #label didnt get xlated so its something we dont care about e.g streetlight print('object {} is not of interest'.format(label)) return xmin,xmax,ymin,ymax,filename,tg_object def convert_kyle(dir='/home/jeremy/Dropbox/tg/hls_tagging/person_wearing_backpack/annotations',filter='.txt'): ''' run yolo on a dir having gt from kyle or elsewhere, get yolo and compare :param dir: :return: ''' gts = [os.path.join(dir,f) for f in dir if filter in f] for gt_file in gts: yolodict = read_various_training_formats.kyle_dicts_to_yolo() def kyle_dicts_to_yolo(dir='/data/jeremy/image_dbs/hls/kyle/person_wearing_hat/annotations_hat',visual_output=True): ''' convert from kyles mac itunes-app generated dict which looks like { "objects" : [ { "label" : "person", "x_y_w_h" : [ 29.75364, 16.1669, 161.5282, 236.6785 ] }, { "label" : "hat", "x_y_w_h" : [ 58.17136, 16.62691, 83.0643, 59.15696 ] } ], "image_path" : "\/Users\/kylegiddens\/Desktop\/ELBIT\/person_wearing_hat\/images1.jpg", "image_w_h" : [ 202, 250 ] } to tgformat (while at it write to json) which looks like [ { "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg" "annotations": [ { "bbox_xywh": [89, 118, 64,44 ], "object": "car" } ], }, ... and use tgdict_to_yolo(tg_dict,label_dir=None,classes=constants.hls_yolo_categories) to finally write yolo trainfiles :param jsonfile: :return: ''' jsonfiles = [os.path.join(dir,f) for f in os.listdir(dir) if '.json' in f] all_tgdicts = [] images_dir = Utils.parent_dir(dir) for jsonfile in jsonfiles: with open(jsonfile,'r') as fp: kyledict = json.load(fp) print(kyledict) tgdict = {} basefile = os.path.basename(kyledict['image_path']) tgdict['filename'] = os.path.join(images_dir,basefile) print('path {} base {} new {}'.format(kyledict['image_path'],basefile,tgdict['filename'])) img_arr=cv2.imread(tgdict['filename']) if img_arr is None: print('COULDNT GET IMAGE '+tgdict['filename']) # tgdict['dimensions_h_w_c']=kyledict['image_w_h'] # tgdict['dimensions_h_w_c'].append(3) #add 3 chans to tgdict tgdict['dimensions_h_w_c'] = img_arr.shape print('tg dims {} kyle dims {}'.format(tgdict['dimensions_h_w_c'],kyledict['image_w_h'])) tgdict['annotations']=[] for kyle_object in kyledict['objects']: tg_annotation_dict={} tg_annotation_dict['object']=kyle_object['label'] tg_annotation_dict['bbox_xywh']=[int(round(x)) for x in kyle_object['x_y_w_h']] tgdict['annotations'].append(tg_annotation_dict) if visual_output: imutils.bb_with_text(img_arr,tg_annotation_dict['bbox_xywh'],tg_annotation_dict['object']) print(tgdict) if visual_output: cv2.imshow('bboxes',img_arr) cv2.waitKey(0) all_tgdicts.append(tgdict) tgdict_to_yolo(tgdict,label_dir=None,classes=constants.hls_yolo_categories) json_out = os.path.join(images_dir,'annotations.json') with open(json_out,'w') as fp: json.dump(all_tgdicts,fp,indent=4) fp.close() def csv_to_tgdict(udacity_csv='/media/jeremy/9FBD-1B00/image_dbs/hls/object-dataset/labels.csv',image_dir=None,classes=constants.hls_yolo_categories,visual_output=False,manual_verification=False,jsonfile=None,parsemethod=parse_udacity,delimiter='\t',readmode='r'): ''' read udaicty csv to grab files here https://github.com/udacity/self-driving-car/tree/master/annotations pedestrians, cars, trucks (and trafficlights in second one) udacity file looks like: xmin,ymin,xmax,ymax,Frame,Label,Preview URL 785,533,905,644,1479498371963069978.jpg,Car,http://crowdai.com/images/Wwj-gorOCisE7uxA/visualize create the 'usual' tg dict for bb's , also write to json while we're at it [ { "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg" "annotations": [ { "bbox_xywh": [89, 118, 64,44 ], "object": "car" } ], }, ... :param udacity_csv: :param label_dir: :param classes: :return: ''' #todo this can be combined with the txt_to_tgdict probably, maybe usin csv.reader instead of csv.dictread # spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|') #... for row in spamreader: #... print ', '.join(row) all_annotations = [] if image_dir is None: image_dir = os.path.dirname(udacity_csv) print('opening udacity csv file {} '.format(udacity_csv)) # with open(udacity_csv, newline='') as file: with open(udacity_csv,readmode) as file: # with open('eggs.csv', newline='') as csvfile: ## reader = csv.DictReader(file,delimiter=delimiter, quotechar='|') reader = csv.DictReader(file) n_rows = 0 max_annotations=10**10 for row in reader: n_rows += 1 print('row'+str(row)) try: xmin,xmax,ymin,ymax,filename,label=parsemethod(row) print('file {} xmin {} ymin {} xmax {} ymax {} object {}'.format(filename,xmin,ymin,xmax,ymax,label)) except: print('trouble getting row '+str(row)) continue try: assert(xmax>xmin) assert(ymax>ymin) except: print('problem with order of x/y min/max') print('xmin {} ymin {} xmax {} ymax {} '.format(xmin,ymin,xmax,ymax)) xmint=min(xmin,xmax) xmax=max(xmin,xmax) xmin=xmint ymint=min(ymin,ymax) ymax=max(ymin,ymax) ymin=ymint bb = [xmin,ymin,xmax-xmin,ymax-ymin] #xywh if image_dir is not None: full_name = os.path.join(image_dir,filename) else: full_name = filename im = cv2.imread(full_name) if im is None: print('couldnt open '+full_name) continue im_h,im_w=im.shape[0:2] annotation_dict = {} annotation_dict['filename']=full_name annotation_dict['annotations']=[] annotation_dict['dimensions_h_w_c'] = im.shape #check if file has already been seen and a dict started, if so use that instead file_already_in_json = False #this is prob a stupid slow way to check for a in all_annotations: if a['filename'] == full_name: annotation_dict=a file_already_in_json = True break # print('im_w {} im_h {} bb {} label {}'.format(im_w,im_h,bb,label)) object_dict={} object_dict['bbox_xywh'] = bb object_dict['object']=label if visual_output or manual_verification: im = imutils.bb_with_text(im,bb,label) magnify = 1 im = cv2.resize(im,(int(magnify*im_w),int(magnify*im_h))) cv2.imshow('full',im) if not manual_verification: cv2.waitKey(5) else: print('(a)ccept , any other key to not accept') k=cv2.waitKey(0) if k == ord('a'): annotation_dict['annotations'].append(object_dict) else: continue #dont add bb to list, go to next csv line if not manual_verification: annotation_dict['annotations'].append(object_dict) # print('annotation dict:'+str(annotation_dict)) if not file_already_in_json: #add new file to all_annotations all_annotations.append(annotation_dict) else: #update current annotation with new bb for a in all_annotations: if a['filename'] == full_name: a=annotation_dict # print('annotation dict:'+str(annotation_dict)) print('# files:'+str(len(all_annotations))) if len(all_annotations)>max_annotations: break # for debugging, these files are ginormous # raw_input('ret to cont') if jsonfile == None: jsonfile = udacity_csv.replace('.csv','.json') with open(jsonfile,'w') as fp: json.dump(all_annotations,fp,indent=4) fp.close() return all_annotations def txt_to_tgdict(txtfile='/media/jeremy/9FBD-1B00/image_dbs/hls/object-dataset/labels.csv',image_dir=None,classes=constants.hls_yolo_categories,visual_output=False,manual_verification=False,jsonfile=None,parsemethod=parse_autti,wait=1): ''' read udaicty csv to grab files here https://github.com/udacity/self-driving-car/tree/master/annotations pedestrians, cars, trucks (and trafficlights in second one) udacity file looks like: xmin,ymin,xmax,ymax,Frame,Label,Preview URL 785,533,905,644,1479498371963069978.jpg,Car,http://crowdai.com/images/Wwj-gorOCisE7uxA/visualize create the 'usual' tg dict for bb's , also write to json while we're at it [ { "dimensions_h_w_c": [360,640,3], "filename": "/data/olympics/olympics/9908661.jpg" "annotations": [ { "bbox_xywh": [89, 118, 64,44 ], "object": "car" } ], }, ... :param udacity_csv: :param label_dir: :param classes: :return: ''' all_annotations = [] if image_dir is None: image_dir = os.path.dirname(txtfile) print('opening udacity csv file {} '.format(txtfile)) with open(txtfile, "r") as file: lines = file.readlines() for row in lines: # print(row) try: xmin,xmax,ymin,ymax,filename,label=parsemethod(row) print('file {} xmin {} ymin {} xmax {} ymax {} object {}'.format(filename,xmin,ymin,xmax,ymax,label)) if label is None: continue except: print('trouble getting row '+str(row)) continue try: assert(xmax>xmin) assert(ymax>ymin) except: print('problem with order of x/y min/max') print('xmin {} ymin {} xmax {} ymax {} '.format(xmin,ymin,xmax,ymax)) xmint=min(xmin,xmax) xmax=max(xmin,xmax) xmin=xmint ymint=min(ymin,ymax) ymax=max(ymin,ymax) ymin=ymint if image_dir is not None: full_name = os.path.join(image_dir,filename) else: full_name = filename im = cv2.imread(full_name) if im is None: print('couldnt open '+full_name) continue im_h,im_w=im.shape[0:2] annotation_dict = {} bb = [xmin,ymin,xmax-xmin,ymax-ymin] #xywh annotation_dict['filename']=full_name annotation_dict['annotations']=[] annotation_dict['dimensions_h_w_c'] = im.shape #check if file has already been seen and a dict started, if so use that instead file_already_in_json = False #this is prob a stupid slow way to check for a in all_annotations: if a['filename'] == full_name: annotation_dict=a file_already_in_json = True break object_dict={} object_dict['bbox_xywh'] = bb object_dict['object']=label if visual_output or manual_verification: im = imutils.bb_with_text(im,bb,label) magnify = 1 im = cv2.resize(im,(int(magnify*im_w),int(magnify*im_h))) cv2.imshow('full',im) if not manual_verification: cv2.waitKey(wait) else: print('(a)ccept , any other key to not accept') k=cv2.waitKey(0) if k == ord('a'): annotation_dict['annotations'].append(object_dict) else: continue #dont add bb to list, go to next csv line if not manual_verification: annotation_dict['annotations'].append(object_dict) # print('annotation dict:'+str(annotation_dict)) if not file_already_in_json: #add new file to all_annotations all_annotations.append(annotation_dict) else: #update current annotation with new bb for a in all_annotations: if a['filename'] == full_name: a=annotation_dict # print('annotation dict:'+str(annotation_dict)) print('# files:'+str(len(all_annotations))) # raw_input('ret to cont') if jsonfile == None: jsonfile = txtfile.replace('.csv','.json').replace('.txt','.json') with open(jsonfile,'w') as fp: json.dump(all_annotations,fp,indent=4) fp.close() return all_annotations def convert_udacity_label_to_tg(udacity_label): # hls_yolo_categories = ['person','person_wearing_hat','person_wearing_backpack','person_holding_bag', # 'man_with_red_shirt','man_with_blue_shirt', # 'car','van','truck','unattended_bag'] #udacity: Car Truck Pedestrian conversions = {'pedestrian':'person', 'car':'car', 'truck':'truck'} if not udacity_label in conversions: print('!!!!!!!!!! did not find {} in conversions from udacity to tg cats !!!!!!!!'.format(udacity_label)) # raw_input('!!') return(None) tg_description = conversions[udacity_label] return(tg_description) def convert_x1x2y1y2_to_yolo(size, box): dw = 1./(size[0]) dh = 1./(size[1]) x = (box[0] + box[1])/2.0 - 1 y = (box[2] + box[3])/2.0 - 1 w = box[1] - box[0] h = box[3] - box[2] x = x*dw w = w*dw y = y*dh h = h*dh return (x,y,w,h) def convert_deepfashion_helper(line,labelfile,dir_to_catlist,visual_output,pardir): global frequencies if not '.jpg' in line: return #first and second lines are metadata with open(labelfile,'a+') as fp2: image_name,x1,y1,x2,y2 = line.split() x1=int(x1) x2=int(x2) y1=int(y1) y2=int(y2) # print('file {} x1 {} y1 {} x2 {} y2 {}'.format(image_name,x1,y2,x2,y2)) image_dir = Utils.parent_dir(image_name) image_dir = image_dir.split('/')[-1] tgcat = create_nn_imagelsts.deepfashion_folder_to_cat(dir_to_catlist,image_dir) if tgcat is None: print('got no tg cat fr '+str(image_dir)) return if not tgcat in constants.trendi_to_pixlevel_v3_map: print('didnt get cat for {} {}'.format(tgcat,line)) return # if not(tgcat is 'lower_cover_long_items' or tgcat is 'lower_cover_short_items' or tgcat is 'bag' or tgcat is 'belt'): # return pixlevel_v3_cat = constants.trendi_to_pixlevel_v3_map[tgcat] pixlevel_v3_index = constants.pixlevel_categories_v3.index(pixlevel_v3_cat) frequencies[pixlevel_v3_index]+=1 print('freq '+str(frequencies)) print('tgcat {} v3cat {} index {}'.format(tgcat,pixlevel_v3_cat,pixlevel_v3_index)) image_path = os.path.join(pardir,image_name) img_arr=cv2.imread(image_path) mask,img_arr2 = grabcut_bb(img_arr,[x1,y1,x2,y2]) # make new img with extraneous removed if(visual_output): cv2.imshow('after gc',img_arr2) # cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2) cv2.imshow('orig',img_arr) cv2.waitKey(0) mask = np.where((mask!=0),1,0).astype('uint8') * pixlevel_v3_index #mask should be from (0,1) but just in case... skin_index = constants.pixlevel_categories_v3.index('skin') skin_mask = kassper.skin_detection_fast(img_arr) * skin_index mask2 =
np.where(skin_mask!=0,skin_mask,mask)
numpy.where
# -*- coding: utf-8 -*- """ Created on Wed Oct 22 11:35:00 2014 @author: <NAME> """ import os import inspect import warnings import sympy as sp from sympy import sin, cos, exp import numpy as np import scipy as sc import scipy.integrate import symbtools as st from symbtools import lzip try: import control except ImportError: control = None from symbtools.test import unittesthelper as uth import unittest from symbtools.test import test_core1 from symbtools.test import test_time_deriv from symbtools.test import test_pickle_tools uth.inject_tests_into_namespace(globals(), test_time_deriv) uth.inject_tests_into_namespace(globals(), test_core1) def make_abspath(*args): """ returns new absolute path, basing on the path of this module """ current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) return os.path.join(current_dir, *args) # Avoid warnings of undefined symbols from the IDE, # but still make use of st.make_global x1 = x2 = x3 = x4 = None y1 = y2 = y3 = None a1 = z4 = z7 = z10 = None # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class InteractiveConvenienceTest(unittest.TestCase): def setUp(self): pass def test_no_IPS_call(self): """ test whether there is some call to interactive IPython (legacy from debugging) """ srclines = inspect.getsourcelines(st)[0] def filter_func(tup): idx, line = tup return 'IPS()' in line and not line.strip()[0] == '#' res = list(filter(filter_func, enumerate(srclines, 1))) self.assertEqual(res, []) def test_symbol_atoms(self): a, b, t = sp.symbols("a, b, t") x1 = a + b x2 = a + b - 3 + sp.pi M1 = sp.Matrix([x2, t, a**2]) M2 = sp.ImmutableDenseMatrix(M1) self.assertEqual(set([a]), a.s) self.assertEqual(x1.atoms(), x1.s) self.assertEqual(x2.atoms(sp.Symbol), x2.s) self.assertEqual(set([a, b, t]), M1.s) self.assertEqual(set([a, b, t]), M2.s) def test_count_ops(self): a, b, t = sp.symbols("a, b, t") x1 = a + b x2 = a + b - 3 + sp.pi M1 = sp.Matrix([x2, t, a**2]) M2 = sp.ImmutableDenseMatrix(M1) self.assertEqual(st.count_ops(a), a.co) self.assertEqual(st.count_ops(x1), x1.co) self.assertEqual(st.count_ops(x2), x2.co) self.assertEqual(st.count_ops(M1), M1.co) self.assertEqual(st.count_ops(M2), M2.co) def test_count_ops2(self): a, b, t = sp.symbols("a, b, t") x1 = a + b x2 = a + b - 3 + sp.pi M1 = sp.Matrix([x2, t, a**2, 0, 1]) M2 = sp.ImmutableDenseMatrix(M1) self.assertEqual(st.count_ops(0), 0) self.assertEqual(st.count_ops(a), 1) self.assertEqual(st.count_ops(1.3), 1) self.assertEqual(st.count_ops(x1), 2) self.assertEqual(st.count_ops(x2), 4) self.assertEqual(st.count_ops(M1), sp.Matrix([4, 1, 2, 0, 1])) self.assertEqual(st.count_ops(M2), sp.Matrix([4, 1, 2, 0, 1])) def test_srn(self): x, y, z = xyz = st.symb_vector('x, y, z') st.random.seed(3319) self.assertAlmostEqual(x.srn01, 0.843044195656457) st.random.seed(3319) x_srn = x.srn self.assertNotAlmostEqual(x_srn, 8.59) self.assertAlmostEqual(x_srn, 8.58739776090811) # now apply round st.random.seed(3319) self.assertAlmostEqual(x.srnr, 8.59) # test compatibility with sp.Matrix # the order might depend on the platform (due to dict ordering) expected_res = [5.667115517927374668261109036393463611602783203125, 7.76957198624519962404377793063758872449398040771484375, 8.58739776090810946751474830307415686547756195068359375] st.random.seed(3319) xyz_srn = list(xyz.srn) xyz_srn.sort() for a, b in zip(xyz_srn, expected_res): self.assertAlmostEqual(a, b) # should live in a separate test !! st.random.seed(3319) # ensure that application to matrix does raise exception _ = xyz.srnr test_matrix = sp.Matrix(expected_res) rounded_res = sp.Matrix([[5.667], [ 7.77], [8.587]]) self.assertNotEqual(test_matrix, rounded_res) self.assertEqual(test_matrix.ar, rounded_res) def test_subz(self): x1, x2, x3 = xx = sp.Matrix(sp.symbols("x1, x2, x3")) y1, y2, y3 = yy = sp.symbols("y1, y2, y3") a = x1 + 7*x2*x3 M1 = sp.Matrix([x2, x1*x2, x3**2]) M2 = sp.ImmutableDenseMatrix(M1) self.assertEqual(x1.subs(lzip(xx, yy)), x1.subz(xx, yy)) self.assertEqual(a.subs(lzip(xx, yy)), a.subz(xx, yy)) self.assertEqual(M1.subs(lzip(xx, yy)), M1.subz(xx, yy)) self.assertEqual(M2.subs(lzip(xx, yy)), M2.subz(xx, yy)) def test_smplf(self): x1, x2, x3 = xx = sp.Matrix(sp.symbols("x1, x2, x3")) y1, y2, y3 = yy = sp.symbols("y1, y2, y3") a = x1**2*(x2/x1 + 7) - x1*x2 M1 = sp.Matrix([sin(x1)**2 + cos(x1)**2, a, x3]) self.assertEqual(M1.smplf, sp.simplify(M1)) self.assertEqual(a.smplf, sp.simplify(a)) def test_subz0(self): x1, x2, x3 = xx = st.symb_vector("x1, x2, x3") y1, y2, y3 = yy = st.symb_vector("y1, y2, y3") XX = (x1, x2) a = x1 + 7*x2*x3 M1 = sp.Matrix([x2, x1*x2, x3**2]) M2 = sp.ImmutableDenseMatrix(M1) self.assertEqual(x1.subs(st.zip0(XX)), x1.subz0(XX)) self.assertEqual(a.subs(st.zip0(XX)), a.subz0(XX)) self.assertEqual(M1.subs(st.zip0(XX)), M1.subz0(XX)) self.assertEqual(M2.subs(st.zip0(XX)), M2.subz0(XX)) konst = sp.Matrix([1,2,3]) zz = konst + xx + 5*yy self.assertEqual(zz.subz0(xx, yy), konst) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class LieToolsTest(unittest.TestCase): def setUp(self): pass def test_involutivity_test(self): x1, x2, x3 = xx = st.symb_vector('x1:4') st.make_global(xx) # not involutive f1 = sp.Matrix([x2*x3 + x1**2, 3*x1, 4 + x2*x3]) f2 = sp.Matrix([x3 - 2*x1*x3, x2 - 5, 3 + x1*x2]) dist1 = st.col_stack(f1, f2) # involutive f3 = sp.Matrix([-x2, x1, 0]) f4 = sp.Matrix([0, -x3, x2]) dist2 = st.col_stack(f3, f4) res, fail = st.involutivity_test(dist1, xx) self.assertFalse(res) self.assertEqual(fail, (0, 1)) res2, fail2 = st.involutivity_test(dist2, xx) self.assertTrue(res2) self.assertEqual(fail2, []) def test_lie_deriv_cartan(self): x1, x2, x3 = xx = sp.symbols('x1:4') u1, u2 = uu = sp.Matrix(sp.symbols('u1:3')) # ordinary lie_derivative # source: inspired by the script of Prof. Kugi (TU-Wien) f = sp.Matrix([-x1**3, cos(x1)*cos(x2), x2]) g = sp.Matrix([cos(x2), 1, exp(x1)]) h = x3 Lfh = x2 Lf2h = f[1] Lgh = exp(x1) res1 = st.lie_deriv_cartan(h, f, xx) res2 = st.lie_deriv_cartan(h, f, xx, order=2) self.assertEqual(res1, Lfh) self.assertEqual(res2, Lf2h) # incorporating the input h2 = u1 udot1, udot2 = uudot = st.time_deriv(uu, uu, order=1) uddot1, uddot2 = st.time_deriv(uu, uu, order=2) res_a1 = st.lie_deriv_cartan(h2, f, xx, uu, order=1) res_a2 = st.lie_deriv_cartan(h2, f, xx, uu, order=2) self.assertEqual(res_a1, udot1) self.assertEqual(res_a2, uddot1) res_a3 = st.lie_deriv_cartan(udot1, f, xx, [uu, uudot], order=1) self.assertEqual(res_a3, uddot1) # more complex examples h3 = x3 + u1 fg = f + g * u2 res_b1 = st.lie_deriv_cartan(h3, fg, xx, uu, order=1) res_b2 = st.lie_deriv_cartan(h3, fg, xx, uu, order=2) res_b3 = st.lie_deriv_cartan(res_b1, fg, xx, [uu, uudot], order=1) self.assertEqual(res_b1, Lfh + Lgh*u2 + udot1) self.assertEqual(sp.expand(res_b2 - res_b3), 0) h4 = x3 * sin(x2) fg = f + g * u2 res_c1 = st.lie_deriv_cartan(h4, fg, xx, uu, order=1) res_c2 = st.lie_deriv_cartan(res_c1, fg, xx, uu, order=1) res_c3 = st.lie_deriv_cartan(h4, fg, xx, uu, order=2) self.assertEqual(sp.expand(res_c2 - res_c3), 0) def test_lie_deriv(self): xx = st.symb_vector('x1:4') st.make_global(xx) f = sp.Matrix([x1 + x3*x2, 7*exp(x1), cos(x2)]) h1 = x1**2 + sin(x3)*x2 res1 = st.lie_deriv(h1, f, xx) eres1 = 2*x1**2 + 2*x1*x2*x3 + 7*exp(x1)*sin(x3) + x2*cos(x2)*cos(x3) self.assertEqual(res1.expand(), eres1) res2a = st.lie_deriv(h1, f, xx, order=2).expand() res2b = st.lie_deriv(h1, f, xx, 2).expand() eres2 = st.lie_deriv(eres1, f, xx).expand() self.assertEqual(res2a, eres2) self.assertEqual(res2b, eres2) res2c = st.lie_deriv(h1, f, f, xx).expand() res2d = st.lie_deriv(h1, f, f, xx=xx).expand() self.assertEqual(res2c, eres2) self.assertEqual(res2d, eres2) F = f[:-1, :] with self.assertRaises(ValueError) as cm: # different lengths of vectorfields: res1 = st.lie_deriv(h1, F, f, xx) # noinspection PyTypeChecker def test_lie_bracket(self): xx = st.symb_vector('x1:4') st.make_global(xx) fx = sp.Matrix([[(x2 - 1)**2 + 1/x3], [x1 + 7], [-x3**2*(x2 - 1)]]) v = sp.Matrix([[0], [0], [-x3**2]]) dist = st.col_stack(v, st.lie_bracket(-fx, v, xx), st.lie_bracket(-fx, v, xx, order=2)) v0, v1, v2 = st.col_split(dist) self.assertEqual(v1, sp.Matrix([1, 0, 0])) self.assertEqual(v2, sp.Matrix([0, 1, 0])) self.assertEqual(st.lie_bracket(fx, fx, xx), sp.Matrix([0, 0, 0])) def test_lie_deriv_covf(self): xx = st.symb_vector('x1:4') st.make_global(xx) # we test this by building the observability matrix with two different but equivalent approaches f = sp.Matrix([x1 + x3*x2, 7*exp(x1), cos(x2)]) y = x1**2 + sin(x3)*x2 ydot = st.lie_deriv(y, f, xx) yddot = st.lie_deriv(ydot, f, xx) cvf1 = st.gradient(y, xx) cvf2 = st.gradient(ydot, xx) cvf3 = st.gradient(yddot, xx) # these are the rows of the observability matrix # second approach dh0 = cvf1 dh1 = st.lie_deriv_covf(dh0, f, xx) dh2a = st.lie_deriv_covf(dh1, f, xx) dh2b = st.lie_deriv_covf(dh0, f, xx, order=2) zero = dh0*0 self.assertEqual((dh1 - cvf2).expand(), zero) self.assertEqual((dh2a - cvf3).expand(), zero) self.assertEqual((dh2b - cvf3).expand(), zero) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class TestSupportFunctions(unittest.TestCase): """ Test functionality which is used indirectly by other functions """ def setUp(self): pass def test_recursive_function_decorator(self): @st.recursive_function def myfactorial(thisfunc, x): if x == 0: return 1 else: return x*thisfunc(x-1) nn = [0, 1, 3, 5, 10] res1 = [sp.factorial(x) for x in nn] res2 = [myfactorial(x) for x in nn] self.assertEqual(res1, res2) def test_get_custom_attr_map(self): t = st.t x1, x2 = xx = st.symb_vector("x1, x2") xdot1, xdot2 = xxd = st.time_deriv(xx, xx) xddot1, xddot2 = xxdd = st.time_deriv(xx, xx, order=2) m1 = st.get_custom_attr_map("ddt_child") em1 = [(x1, xdot1), (x2, xdot2), (xdot1, xddot1), (xdot2, xddot2)] # convert to set because sorting might depend on plattform self.assertEqual(set(m1), set(em1)) m2 = st.get_custom_attr_map("ddt_parent") em2 = [(xdot1, x1), (xdot2, x2), (xddot1, xdot1), (xddot2, xdot2)] self.assertEqual(set(m2), set(em2)) m3 = st.get_custom_attr_map("ddt_func") # ensure unique sorting m3.sort(key=lambda x: "{}_{}".format(x[0].difforder, str(x[0]))) self.assertEqual(len(m3), 6) x2_func = sp.Function(x2.name)(t) self.assertEqual(type(type(m3[0][1])), sp.function.UndefinedFunction) self.assertEqual(m3[-1][1], x2_func.diff(t, t)) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class SymbToolsTest2(unittest.TestCase): def setUp(self): pass def test_solve_scalar_ode_1sto(self): a, b = sp.symbols("a, b", nonzero=True) t, x1, x2 = sp.symbols("t, x1, x2") # x1_dot = <rhs> rhs1 = sp.S(0) rhs2 = sp.S(2.5) rhs3 = x1 rhs5 = x1*(3-t) rhs6 = cos(b*t) # coeff must be nonzero to prevent case distinction res1 = st.solve_scalar_ode_1sto(rhs1, x1, t) self.assertEqual(res1.diff(t), rhs1.subs(x1, res1)) res2 = st.solve_scalar_ode_1sto(rhs2, x1, t) self.assertEqual(res2.diff(t), rhs2.subs(x1, res2)) res3, iv3 = st.solve_scalar_ode_1sto(rhs3, x1, t, return_iv=True) self.assertEqual(res3.diff(t), rhs3.subs(x1, res3)) self.assertEqual(res3, iv3*exp(t)) res5 = st.solve_scalar_ode_1sto(rhs5, x1, t) test_difference5 = res5.diff(t) - rhs5.subs(x1, res5) self.assertEqual(test_difference5.expand(), 0) res6 = st.solve_scalar_ode_1sto(rhs6, x1, t) self.assertEqual(res6.diff(t), rhs6.subs(x1, res6).expand()) @uth.skip_slow def test_solve_scalar_ode_1sto_2(self): a, b = sp.symbols("a, b", nonzero=True) t, x1, x2 = sp.symbols("t, x1, x2") rhs4 = sin(a*x1) # this test works but is slow with st.warnings.catch_warnings(record=True) as cm: res4 = st.solve_scalar_ode_1sto(rhs4, x1, t) self.assertEqual(len(cm), 1) self.assertTrue('multiple solutions' in str(cm[0].message)) test_difference4 = res4.diff(t) - rhs4.subs(x1, res4) self.assertEqual(test_difference4.simplify(), 0) def test_calc_flow_from_vectorfield(self): a, b = sp.symbols("a, b", nonzero=True) t, x1, x2, x3, x4 = sp.symbols("t, x1, x2, x3, x4") xx = x1, x2, x3, x4 vf1 = sp.Matrix([0, 1, x3]) vf2 = sp.Matrix([0, 1, x3, sin(a*x2)]) res1, fp, iv1 = st.calc_flow_from_vectorfield(vf1, xx[:-1], flow_parameter=t) vf1_sol = vf1.subs(lzip(xx[:-1], res1)) self.assertEqual(fp, t) self.assertEqual(res1.diff(t), vf1_sol) res2, fp, iv2 = st.calc_flow_from_vectorfield(vf2, xx, flow_parameter=t) vf2_sol = vf2.subs(lzip(xx[:-1], res2)) self.assertEqual(fp, t) self.assertEqual(res2.diff(t), vf2_sol) res3, fp, iv3 = st.calc_flow_from_vectorfield(sp.Matrix([x1, 1, x1]), xx[:-1]) t = fp x1_0, x2_0, x3_0 = iv3 ref3 = sp.Matrix([[x1_0*sp.exp(t)], [t + x2_0], [x1_0*sp.exp(t) - x1_0 + x3_0]]) self.assertEqual(res3, ref3) def test_create_simfunction(self): x1, x2, x3, x4 = xx = sp.Matrix(sp.symbols("x1, x2, x3, x4")) u1, u2 = uu = sp.Matrix(sp.symbols("u1, u2")) # inputs p1, p2, p3, p4 = pp = sp.Matrix(sp.symbols("p1, p2, p3, p4")) # parameter t = sp.Symbol('t') A = A0 = sp.randMatrix(len(xx), len(xx), -10, 10, seed=704) B = B0 = sp.randMatrix(len(xx), len(uu), -10, 10, seed=705) v1 = A[0, 0] A[0, 0] = p1 v2 = A[2, -1] A[2, -1] = p2 v3 = B[3, 0] B[3, 0] = p3 v4 = B[2, 1] B[2, 1] = p4 par_vals = lzip(pp, [v1, v2, v3, v4]) f = A*xx G = B fxu = (f + G*uu).subs(par_vals) # some random initial values x0 = st.to_np( sp.randMatrix(len(xx), 1, -10, 10, seed=706) ).squeeze() # Test handling of unsubstituted parameters mod = st.SimulationModel(f, G, xx, model_parameters=par_vals[1:]) with self.assertRaises(ValueError) as cm: rhs0 = mod.create_simfunction() self.assertTrue("unexpected symbols" in cm.exception.args[0]) # create the model and the rhs-function mod = st.SimulationModel(f, G, xx, par_vals) rhs0 = mod.create_simfunction() self.assertFalse(mod.compiler_called) self.assertFalse(mod.use_sp2c) res0_1 = rhs0(x0, 0) dres0_1 = st.to_np(fxu.subs(lzip(xx, x0) + st.zip0(uu))).squeeze() bin_res01 = np.isclose(res0_1, dres0_1) # binary array self.assertTrue( np.all(bin_res01) ) # difference should be [0, 0, ..., 0] self.assertFalse( np.any(rhs0(x0, 0) - rhs0(x0, 3.7) ) ) # simulate tt = np.linspace(0, 0.5, 100) # simulation should be short due to instability res1 = sc.integrate.odeint(rhs0, x0, tt) # create and try sympy_to_c bridge (currently only works on linux # and if sympy_to_c is installed (e.g. with `pip install sympy_to_c`)) # until it is not available for windows we do not want it as a requirement # see also https://stackoverflow.com/a/10572833/333403 try: import sympy_to_c except ImportError: # noinspection PyUnusedLocal sympy_to_c = None sp2c_available = False else: sp2c_available = True if sp2c_available: rhs0_c = mod.create_simfunction(use_sp2c=True) self.assertTrue(mod.compiler_called) res1_c = sc.integrate.odeint(rhs0_c, x0, tt) self.assertTrue(np.all(np.isclose(res1_c, res1))) mod.compiler_called = None rhs0_c = mod.create_simfunction(use_sp2c=True) self.assertTrue(mod.compiler_called is None) # proof calculation # x(t) = x0*exp(A*t) Anum = st.to_np(A.subs(par_vals)) Bnum = st.to_np(G.subs(par_vals)) # noinspection PyUnresolvedReferences xt = [ np.dot( sc.linalg.expm(Anum*T), x0 ) for T in tt ] xt = np.array(xt) # test whether numeric results are close within given tolerance bin_res1 = np.isclose(res1, xt, rtol=2e-5) # binary array self.assertTrue( np.all(bin_res1) ) # test handling of parameter free models: mod2 = st.SimulationModel(Anum*xx, Bnum, xx) rhs2 = mod2.create_simfunction() res2 = sc.integrate.odeint(rhs2, x0, tt) self.assertTrue(np.allclose(res1, res2)) # test input functions des_input = st.piece_wise((0, t <= 1 ), (t, t < 2), (0.5, t < 3), (1, True)) des_input_func_scalar = st.expr_to_func(t, des_input) des_input_func_vec = st.expr_to_func(t, sp.Matrix([des_input, des_input]) ) # noinspection PyUnusedLocal with self.assertRaises(TypeError) as cm: mod2.create_simfunction(input_function=des_input_func_scalar) rhs3 = mod2.create_simfunction(input_function=des_input_func_vec) # noinspection PyUnusedLocal res3_0 = rhs3(x0, 0) rhs4 = mod2.create_simfunction(input_function=des_input_func_vec, time_direction=-1) res4_0 = rhs4(x0, 0) self.assertTrue(np.allclose(res3_0, np.array([119., -18., -36., -51.]))) self.assertTrue(np.allclose(res4_0, - res3_0)) def test_create_simfunction2(self): x1, x2, x3, x4 = xx = sp.Matrix(sp.symbols("x1, x2, x3, x4")) u1, u2 = uu = sp.Matrix(sp.symbols("u1, u2")) # inputs p1, p2, p3, p4 = pp = sp.Matrix(sp.symbols("p1, p2, p3, p4")) # parameter t = sp.Symbol('t') A = A0 = sp.randMatrix(len(xx), len(xx), -10, 10, seed=704) B = B0 = sp.randMatrix(len(xx), len(uu), -10, 10, seed=705) v1 = A[0, 0] A[0, 0] = p1 v2 = A[2, -1] A[2, -1] = p2 v3 = B[3, 0] B[3, 0] = p3 v4 = B[2, 1] B[2, 1] = p4 par_vals = lzip(pp, [v1, v2, v3, v4]) f = A*xx G = B fxu = (f + G*uu).subs(par_vals) # some random initial values x0 = st.to_np( sp.randMatrix(len(xx), 1, -10, 10, seed=706) ).squeeze() u0 = st.to_np( sp.randMatrix(len(uu), 1, -10, 10, seed=2257) ).squeeze() # create the model and the rhs-function mod = st.SimulationModel(f, G, xx, par_vals) rhs_xx_uu = mod.create_simfunction(free_input_args=True) res0_1 = rhs_xx_uu(x0, u0, 0) dres0_1 = st.to_np(fxu.subs(lzip(xx, x0) + lzip(uu, u0))).squeeze() bin_res01 = np.isclose(res0_1, dres0_1) # binary array self.assertTrue( np.all(bin_res01) ) def test_num_trajectory_compatibility_test(self): x1, x2, x3, x4 = xx = sp.Matrix(sp.symbols("x1, x2, x3, x4")) u1, u2 = uu = sp.Matrix(sp.symbols("u1, u2")) # inputs t = sp.Symbol('t') # we want to create a random but stable matrix np.random.seed(2805) diag = np.diag( np.random.random(len(xx))*-10 ) T = sp.randMatrix(len(xx), len(xx), -10, 10, seed=704) Tinv = T.inv() A = Tinv*diag*T B = B0 = sp.randMatrix(len(xx), len(uu), -10, 10, seed=705) x0 = st.to_np( sp.randMatrix(len(xx), 1, -10, 10, seed=706) ).squeeze() tt = np.linspace(0, 5, 2000) des_input = st.piece_wise((2-t, t <= 1 ), (t, t < 2), (2*t-2, t < 3), (4, True)) des_input_func_vec = st.expr_to_func(t, sp.Matrix([des_input, des_input]) ) mod2 = st.SimulationModel(A*xx, B, xx) rhs3 = mod2.create_simfunction(input_function=des_input_func_vec) XX = sc.integrate.odeint(rhs3, x0, tt) UU = des_input_func_vec(tt) res1 = mod2.num_trajectory_compatibility_test(tt, XX, UU) self.assertTrue(res1) # slightly different input signal -> other results res2 = mod2.num_trajectory_compatibility_test(tt, XX, UU*1.1) self.assertFalse(res2) def test_expr_to_func(self): x1, x2 = xx = sp.Matrix(sp.symbols("x1, x2")) t, = sp.symbols("t,") r_ = np.r_ f1 = st.expr_to_func(x1, 2*x1) self.assertEqual(f1(5.1), 10.2) XX1 = np.r_[1, 2, 3.7] res1 = f1(XX1) == 2*XX1 self.assertTrue(res1.all) f2 = st.expr_to_func(x1, sp.Matrix([x1*2, x1+5, 4])) res2 = f2(3) == r_[6, 8, 4] self.assertTrue(res2.all()) res2b = f2(r_[3, 10, 0]) == np.array([[6, 8, 4], [20, 15, 4], [0, 5, 4]]) self.assertTrue(res2b.all()) f3 = st.expr_to_func(xx, sp.Matrix([x1*2, x2+5, 4])) res3 = np.allclose(f3(-3.1, 4), r_[-6.2, 9, 4]) self.assertTrue(res3) # test compatibility with Piecewise Expressions des_input = st.piece_wise((0, t <= 1 ), (t, t < 2), (0.5, t < 3), (1, True)) f4s = st.expr_to_func(t, des_input) f4v = st.expr_to_func(t, sp.Matrix([des_input, des_input]) ) self.assertEqual(f4s(2.7), 0.5) sol = r_[0, 1.6, 0.5, 1, 1] res4a = f4s(r_[0.3, 1.6, 2.2, 3.1, 500]) == sol self.assertTrue(res4a.all()) res4b = f4v(r_[0.3, 1.6, 2.2, 3.1, 500]) col1, col2 = res4b.T self.assertTrue(np.array_equal(col1, sol)) self.assertTrue(np.array_equal(col2, sol)) spmatrix = sp.Matrix([[x1, x1*x2], [0, x2**2]]) fnc1 = st.expr_to_func(xx, spmatrix, keep_shape=False) fnc2 = st.expr_to_func(xx, spmatrix, keep_shape=True) res1 = fnc1(1.0, 2.0) res2 = fnc2(1.0, 2.0) self.assertEqual(res1.shape, (4, )) self.assertEqual(res2.shape, (2, 2)) # noinspection PyTypeChecker self.assertTrue(np.all(res1 == [1, 2, 0, 4])) # noinspection PyTypeChecker self.assertTrue(np.all(res1 == res2.flatten())) fnc = st.expr_to_func(xx, x1 + x2) self.assertEqual(fnc(1, 3), 4) xx_res = np.array([1, 3, 1.1, 3, 1.2, 3.0]).reshape(3, -1) self.assertTrue(np.allclose(fnc(*xx_res.T), np.array([4, 4.1, 4.2]))) fnc1 = st.expr_to_func(xx, 3*xx) fnc2 = st.expr_to_func(xx, 3*xx, allow_kwargs=True) self.assertTrue(np.allclose(fnc1(10, 100), fnc2(x2=100, x1=10))) def test_reformulate_Integral(self): t = sp.Symbol('t') c = sp.Symbol('c') F = sp.Function('F') x = sp.Function('x')(t) a = sp.Function('a') i1 = sp.Integral(F(t), t) j1 = st.reformulate_integral_args(i1) self.assertEqual(j1.subs(t, 0).doit(), 0) ode = x.diff(t) + x -a(t)*x**c sol = sp.dsolve(ode, x).rhs # the solution contains an undetemined integral self.assertTrue( len(sol.atoms(sp.Integral)) == 1) # extract the integration constant (not necessary for test) # C1 = list(sol.atoms(sp.Symbol)-ode.atoms(sp.Symbol))[0] sol2 = st.reformulate_integral_args(sol) self.assertTrue( len(sol2.atoms(sp.Integral)) == 1) sol2_at_0 = sol2.subs(t, 0).doit() self.assertTrue( len(sol2_at_0.atoms(sp.Integral)) == 0) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class SymbToolsTest3(unittest.TestCase): def setUp(self): st.init_attribute_store(reinit=True) def test_get_symbols_by_name(self): c1, C1, x, a, t, Y = sp.symbols('c1, C1, x, a, t, Y') F = sp.Function('F') expr1 = c1*(C1+x**x)/(sp.sin(a*t)) expr2 = sp.Matrix([sp.Integral(F(x), x)*sp.sin(a*t) - \ 1/F(x).diff(x)*C1*Y]) res1 = st.get_symbols_by_name(expr1, 'c1') self.assertEqual(res1, c1) res2 = st.get_symbols_by_name(expr1, 'C1') self.assertEqual(res2, C1) res3 = st.get_symbols_by_name(expr1, *'c1 x a'.split()) self.assertEqual(res3, [c1, x, a]) with self.assertRaises(ValueError) as cm: st.get_symbols_by_name(expr1, 'Y') with self.assertRaises(ValueError) as cm: st.get_symbols_by_name(expr1, 'c1', 'Y') res4 = st.get_symbols_by_name(expr2, 'Y') self.assertEqual(res4, Y) res5 = st.get_symbols_by_name(expr2, 'C1') self.assertEqual(res5, C1) res6 = st.get_symbols_by_name(expr2, *'C1 x a'.split()) self.assertEqual(res6, [C1, x, a]) def test_general_attribute(self): st.register_new_attribute_for_sp_symbol("foo", save_setter=False) st.register_new_attribute_for_sp_symbol("bar", getter_default="__self__") x1 = sp.Symbol('x1') self.assertEqual(x1.foo, None) self.assertEqual(x1.bar, x1) x1.foo = 7 self.assertEqual(x1.foo, 7) x1.foo = "some string" self.assertEqual(x1.foo, "some string") x1.foo = x1 self.assertEqual(x1.foo, x1) x1.bar = 12 # noinspection PyUnusedLocal with self.assertRaises(ValueError) as cm: x1.bar = 13 def test_difforder_attribute(self): x1 = sp.Symbol('x1') self.assertEqual(x1.difforder, 0) xddddot1 = st.time_deriv(x1, [x1], order=4) self.assertEqual(xddddot1.difforder, 4) xx = sp.Matrix(sp.symbols("x1, x2, x3")) xxd = st.time_deriv(xx, xx) xxdd = st.time_deriv(xx, xx, order=2) for xdd in xxdd: self.assertEqual(xdd.difforder, 2) # once, this was a bug y = sp.Symbol('y') ydot = st.time_deriv(y, [y]) yddot = st.time_deriv(ydot, [y, ydot]) self.assertEqual(yddot.difforder, 2) z = sp.Symbol('z') zdot_false = sp.Symbol('zdot') st.global_data.attribute_store[(zdot_false, 'difforder')] = -7 with self.assertRaises(ValueError) as cm: st.time_deriv( z, [z]) # ensure that difforder is not changed after value_set z2 = sp.Symbol('z2') z2.difforder = 3 z2.difforder = 3 # same value is allowed with self.assertRaises(ValueError) as cm: z2.difforder = 4 # not allowed def test_introduce_abreviations(self): x1, x2, x3 = xx = st.symb_vector('x1:4') a1, a2, a3 = aa = st.symb_vector('a1:4') P1 = sp.eye(3) P2 = sp.Matrix([x1**2, a1+a2, a3*x2, 13.7, 1, 0]) res1 = st.introduce_abreviations(P1) res2 = st.introduce_abreviations(P1, time_dep_symbs=xx) res3 = st.introduce_abreviations(P2, time_dep_symbs=xx) self.assertEqual(res1[0], P1) self.assertEqual(res2[0], P1) # test subs_tuples self.assertNotEqual(res3[0], P2) self.assertEqual(res3[0].subs(res3[1]), P2) # time dependend symbols tds = res3[2] original_expressions = tds.subs(res3[1]) self.assertEqual(original_expressions, sp.Matrix([x1**2, a3*x2])) def _test_make_global(self): xx = st.symb_vector('x1:4') yy = st.symb_vector('y1:4') st.make_global(xx) self.assertEqual(x1 + x2, xx[0] + xx[1]) # test if set is accepted st.make_global(yy.atoms(sp.Symbol)) self.assertEqual(y1 + y2, yy[0] + yy[1]) with self.assertRaises(TypeError) as cm: st.make_global(dict()) def test_make_global(self): aa = tuple(st.symb_vector('a1:4')) xx = st.symb_vector('x1:4') yy = st.symb_vector('y1:4') zz = st.symb_vector('z1:11').reshape(2, 5) # tollerate if there are numbers in the sequences: zz[0] = 0 zz[1] = 10 st.make_global(xx, yy, zz, aa) res = a1 + x2 + y3 + z4 + z7 + z10 res2 = aa[0] + xx[1] + yy[2] + zz[3] + zz[6] + zz[9] self.assertEqual(res, res2) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class SymbToolsTest4(unittest.TestCase): def setUp(self): st.init_attribute_store(reinit=True) def test_re_im(self): x, y = sp.symbols('x, y', real=True) M1 = sp.Matrix([[x, 0], [sp.pi, 5*x**2]]) M2 = sp.Matrix([[y, 3], [sp.exp(1), 7/y]]) M = M1 + 1j*M2 R = st.re(M) I = st.im(M) self.assertEqual(R-M1, R*0) self.assertEqual(I-M2, R*0) def test_is_number(self): x1, x2, x3 = xx = st.symb_vector('x1:4') self.assertTrue(st.is_number(x1/x1)) self.assertTrue(st.is_number(5)) self.assertTrue(st.is_number(5.3)) self.assertTrue(st.is_number(sp.pi)) self.assertTrue(st.is_number(sp.Rational(2, 7))) self.assertTrue(st.is_number(sp.Rational(2, 7).evalf(30))) self.assertTrue(st.is_number(sin(7))) self.assertTrue(st.is_number(np.float(9000))) self.assertFalse(st.is_number(x1)) self.assertFalse(st.is_number(sin(x1))) with self.assertRaises(TypeError) as cm: st.is_number( sp.eye(3) ) with self.assertRaises(TypeError) as cm: st.is_number( "567" ) def test_is_scalar(self): x1, x2, x3 = xx = st.symb_vector('x1:4') self.assertTrue(st.is_scalar(x1/x1)) self.assertTrue(st.is_scalar(5)) self.assertTrue(st.is_scalar(5.3)) self.assertTrue(st.is_scalar(sp.pi)) self.assertTrue(st.is_scalar(sp.Rational(2, 7))) self.assertTrue(st.is_scalar(sp.Rational(2, 7).evalf(30))) self.assertTrue(st.is_scalar(sin(7))) self.assertTrue(st.is_scalar(np.float(9000))) self.assertTrue(st.is_scalar(x1**2 + x3)) self.assertFalse(st.is_scalar( sp.eye(3)*x2 )) self.assertFalse(st.is_scalar( sp.zeros(2, 4)*x2 )) self.assertFalse(st.is_scalar( sp.eye(0)*x2 )) def test_is_scalar2(self): x1, x2, x3 = xx = st.symb_vector('x1:4') a1, a2, a3 = aa = st.symb_vector('a1:4') M1 = sp.Matrix([[0, 0], [a1, a2], [0, a3]]) M2 = sp.ImmutableDenseMatrix(M1) iss = st.is_scalar self.assertTrue(iss(x1)) self.assertTrue(iss(x1 ** 2 + sp.sin(x2))) self.assertTrue(iss(0)) self.assertTrue(iss(0.1)) self.assertTrue(iss(7.5 - 23j)) self.assertTrue(iss(np.float64(0.1))) self.assertFalse(iss(M1)) self.assertFalse(iss(M2)) self.assertFalse(iss(M1[:1, :1])) self.assertFalse(iss(np.arange(5))) def test_sca_integrate(self): """ test special case aware integrate """ x1, x2, x3 = xx = st.symb_vector('x1:4') f = sp.log(cos(x1)) df = f.diff(x1) F = st.sca_integrate(df, x1) self.assertEqual(F, f) if 1: f = 5*x1 df = f.diff(x1) F = st.sca_integrate(df, x1) self.assertEqual(F, f) f = cos(x1)*x1 df = f.diff(x1) F = st.sca_integrate(df, x1) self.assertEqual(F, f) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class TestNumTools(unittest.TestCase): def setUp(self): n = 5 self.ev = sp.randMatrix(n, 1, seed=1631) d = sp.diag(*self.ev) self.T = T = sp.randMatrix(n, n, seed=1632) assert not T.det() == 0 self.M1 = T*d*T.inv() self.ev_sorted = list(self.ev) self.ev_sorted.sort(reverse=True) # # self.M2 = sp.Matrix([[0, 1], [-1, 0]]) def test_sorted_eigenvalues(self): res1 = st.sorted_eigenvalues(self.M1) self.assertEqual(res1, self.ev_sorted) # imaginary unit I = sp.I res2 = st.sorted_eigenvalues(self.M2) self.assertTrue(I in res2) self.assertTrue(-I in res2) self.assertEqual(2, len(res2)) def test_sorted_eigenvectors(self): V1 = st.sorted_eigenvector_matrix(self.M1) ev1 = st.sorted_eigenvalues(self.M1) self.assertEqual(len(ev1), V1.shape[1]) for val, vect in lzip(ev1, st.col_split(V1)): res_vect = self.M1*vect - val*vect res = (res_vect.T*res_vect)[0] self.assertTrue(res < 1e-15) self.assertAlmostEqual( (vect.T*vect)[0] - 1, 0) V2 = st.sorted_eigenvector_matrix(self.M1, numpy=True) V3 = st.sorted_eigenvector_matrix(self.M1, numpy=True, increase=True) # quotients should be +-1 res1 = np.abs( st.to_np(V1) / st.to_np(V2) ) - np.ones_like(V1) res2 = np.abs( st.to_np(V1) / st.to_np(V3[:, ::-1]) ) - np.ones_like(V1) self.assertTrue(np.max(np.abs(res1)) < 1e-5) self.assertTrue(np.max(np.abs(res2)) < 1e-5) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class RandNumberTest(unittest.TestCase): def setUp(self): pass def test_rnd_number_tuples(self): x1, x2, x3 = xx = sp.symbols('x1:4') s = sum(xx) res_a1 = st.rnd_number_subs_tuples(s) self.assertTrue(isinstance(res_a1, list)) self.assertEqual(len(res_a1), len(xx)) c1 = [len(e)==2 and e[0].is_Symbol and st.is_number(e[1]) for e in res_a1] self.assertTrue( all(c1) ) t = sp.Symbol('t') f = sp.Function('f')(t) fdot = f.diff(t) fddot = f.diff(t, 2) ff = sp.Matrix([f, fdot, fddot, x1*x2]) for i in range(100): res_b1 = st.rnd_number_subs_tuples(ff, seed=i) expct_b1_set = set([f, fdot, fddot, t, x1, x2]) res_b1_atom_set = set( lzip(*res_b1)[0] ) self.assertEqual(expct_b1_set, res_b1_atom_set) # highest order has to be returned first self.assertEqual(res_b1[0][0], fddot) self.assertEqual(res_b1[1][0], fdot) self.assertTrue( all( [st.is_number(e[1]) for e in res_b1] ) ) def test_rnd_number_tuples2(self): x1, x2, x3 = xx = st.symb_vector('x1:4') yy = st.symb_vector('y1:4') s = sum(xx) res_a1 = st.rnd_number_subs_tuples(s, seed=1) res_a2 = st.rnd_number_subs_tuples(s, seed=2) self.assertNotEqual(res_a1, res_a2) res_b1 = st.rnd_number_subs_tuples(s, seed=2) self.assertEqual(res_b1, res_a2) xxyy = xx + yy rnst1 = st.rnd_number_subs_tuples(xxyy) rnst2 = st.rnd_number_subs_tuples(xxyy, exclude=x1) rnst3 = st.rnd_number_subs_tuples(xxyy, exclude=[x1, x2]) rnst4 = st.rnd_number_subs_tuples(xxyy, exclude=xx) symbols1 = xxyy.subs(rnst1).atoms(sp.Symbol) symbols2 = xxyy.subs(rnst2).atoms(sp.Symbol) symbols3 = xxyy.subs(rnst3).atoms(sp.Symbol) symbols4 = xxyy.subs(rnst4).atoms(sp.Symbol) self.assertEqual(symbols1, set()) self.assertEqual(symbols2, set([x1])) self.assertEqual(symbols3, set([x1, x2])) self.assertEqual(symbols4, set([x1, x2, x3])) # this was a bug: rnst = st.rnd_number_subs_tuples(xxyy, prime=True, exclude=[x1, x2]) self.assertEqual(xxyy.subs(rnst).atoms(sp.Symbol), set([x1, x2])) def test_rnd_number_tuples3(self): a, b = sp.symbols('a, b', commutative=False) term1 = a*b - b*a st.warnings.simplefilter("always") with st.warnings.catch_warnings(record=True) as cm: st.rnd_number_subs_tuples(term1) self.assertEqual(len(cm), 1) self.assertTrue('not commutative' in str(cm[0].message)) with st.warnings.catch_warnings(record=True) as cm2: st.subs_random_numbers(term1) self.assertEqual(len(cm2), 1) self.assertTrue('not commutative' in str(cm2[0].message)) def test_generic_rank1(self): x1, x2, x3 = xx = st.symb_vector('x1:4') M1 = sp.Matrix([[x1, 0], [0, x2]]) M2 = sp.Matrix([[1, 0], [sin(x1)**2, sin(x1)**2 + cos(x1)**2 - 1]]) # singular M3 = sp.Matrix([[1, 0], [1, sin(x1)**50]]) # regular M4 = sp.Matrix([[1, 0, 0], [1, sin(x1)**50, 1], [0, 0, 0]]) # rank 2 M5 = sp.Matrix([[-x2, 0, -x3], [ x1, -x3, 0], [ 0, x2, x1]]) M6 = sp.Matrix([[1, 0, 0], [sin(x1)**2, sin(x1)**2 + cos(x1)**2 - 1, 0], [0, sp.pi, sin(-3)**50]]) # rank 2 M7 = st.row_stack(M6, [sp.sqrt(5)**-20, 2, 0]) # nonsquare, rank 3 M8 = sp.diag(1, sin(3)**2 + cos(3)**2 - 1, sin(3)**30, sin(3)**150) # test for a specific bug xxdd = st.symb_vector('xdot1, xdot2, xddot1, xddot2, xdddot1') xdot1, xdot2, xddot1, xddot2, xdddot1 = xxdd M9 = sp.Matrix([[1.00000000000000, 1.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0, 0, 0, 0, 0], [1.0*x2, 1.0*x1*x2, 1.0*x2, 0, 0, 0, 0, 0, 0, 0], [0, 1.0*xddot1, 1.00000000000000, 2.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0, 0, 0], [1.0*xdot2, 1.0*x1*xdot2 + 1.0*x2*xdot1, 1.0*x2 + 1.0*xdot2, 1.0*x1*x2, 1.0*x2, 0, 0, 0, 0, 0], [0, 1.0*xdddot1, 0, 3.0*xddot1, 1.00000000000000, 3.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0], [1.0*xddot2, 1.0*x1*xddot2 + 1.0*x2*xddot1 + 2.0*xdot1*xdot2, 1.0*xddot2 + 2.0*xdot2, 2.0*x1*xdot2 + 2.0*x2*xdot1, 1.0*x2 + 2.0*xdot2, 1.0*x1*x2, 1.0*x2, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]) res1 = st.generic_rank(M1, seed=98682) self.assertEqual(res1, 2) res2 = st.generic_rank(M2) self.assertEqual(res2, 1) res3 = st.generic_rank(M3, seed=1814) self.assertEqual(res3, 2) self.assertEqual(st.generic_rank(M2, seed=1529), 1) self.assertEqual(st.generic_rank(M4, seed=1814), 2) self.assertEqual(st.generic_rank(M5, seed=1814), 2) self.assertEqual(st.generic_rank(M6, seed=1814), 2) self.assertEqual(st.generic_rank(M7, seed=1814), 3) self.assertEqual(st.generic_rank(M7.T, seed=1814), 3) self.assertEqual(st.generic_rank(M8, seed=1814), 3) # TODO: This should raise a warning with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") res = st.generic_rank(M9, seed=2051) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) self.assertTrue("Float" in str(w[-1].message)) # nevertheless result should be correct self.assertEqual(res, 6) def test_rationalize_all_numbers(self): xxdd = st.symb_vector('x1, x2, xdot1, xdot2, xddot1, xddot2, xdddot1') x1, x2, xdot1, xdot2, xddot1, xddot2, xdddot1 = xxdd M1 = sp.Matrix([[1.00000000000000, 1.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0, 0, 0, 0, 0], [1.0*x2, 1.0*x1*x2, 1.0*x2, 0, 0, 0, 0, 0, 0, 0], [0, 1.0*xddot1, 1.00000000000000, 2.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0, 0, 0], [1.0*xdot2, 1.0*x1*xdot2 + 1.0*x2*xdot1, 1.0*x2 + 1.0*xdot2, 1.0*x1*x2, 1.0*x2, 0, 0, 0, 0, 0], [0, 1.0*xdddot1, 0, 3.0*xddot1, 1.00000000000000, 3.0*xdot1, 1.00000000000000, 1.0*x1, 1.00000000000000, 0], [1.0*xddot2, 1.0*x1*xddot2 + 1.0*x2*xddot1 + 2.0*xdot1*xdot2, 1.0*xddot2 + 2.0*xdot2, 2.0*x1*xdot2 + 2.0*x2*xdot1, 1.0*x2 + 2.0*xdot2, 1.0*x1*x2, 1.0*x2, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, np.pi, 0, 0, 0, 0, 0]]) types1 = [type(a) for a in M1.atoms(sp.Number)] self.assertTrue(sp.Float in types1) M2 = st.rationalize_all_numbers(M1) types2 = [type(a) for a in M2.atoms(sp.Number)] self.assertFalse(sp.Float in types2) @uth.skip_slow def test_generic_rank2(self): import pickle path = make_abspath('test_data', 'rank_test_matrices.pcl') with open(path, 'rb') as pfile: matrix_list = pickle.load(pfile) N = len(matrix_list) for i, m in enumerate(matrix_list): print("%i / %i" %(i, N)) r1 = m.srnp.rank() r2 = st.generic_rank(m) self.assertEqual(r1, r2) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class TestTrajectoryPlanning(unittest.TestCase): def setUp(self): pass def test_transpoly(self): x, y = sp.symbols("x, y") res1 = st.trans_poly(x, 0, (0, 0), (2, 1)) self.assertEqual(res1, x/2) res2 = st.trans_poly(x, 1, (0, 0, 1), (2, 1, 1)) self.assertEqual(res2, x**3/4 - 3*x**2/4 + x) def test_condition_poly(self): x, y = sp.symbols("x, y") res1 = st.condition_poly(x, (0, 0, 1), (2, 1, 1)) self.assertEqual(res1, x**3/4 - 3*x**2/4 + x) res2 = st.condition_poly(x, (0, 0), (2, -4, 0, 3)) self.assertEqual(res2.subs(x, 0), 0) self.assertEqual(res2.subs(x, 2), -4) self.assertEqual(res2.diff(x).subs(x, 2), 0) self.assertEqual(res2.diff(x, x).subs(x, 2), 3) # now only with one condition res3 = st.condition_poly(x, (0, 1.75)) self.assertEqual(res3.subs(x, 0), 1.75) def test_create_piecewise(self): t, x = sp.symbols('t, x') interface_points1 = [0, 4] expr1 = st.create_piecewise(t, interface_points1, [-1, x, -13]) self.assertEqual(expr1.subs(t, -3), -1) self.assertEqual(expr1.subs(t, 0), x) self.assertEqual(expr1.subs(t, 3), x) self.assertEqual(expr1.subs(t, 4), x) self.assertEqual(expr1.subs(t, 4.00000001), -13) self.assertEqual(expr1.subs(t, 10**100), -13) interface_points2 = [0, 4, 8, 12] expr1 = st.create_piecewise(t, interface_points2, [-1, x, x**2, x**3, -13]) self.assertEqual(expr1.subs(t, -2), -1) self.assertEqual(expr1.subs(t, 0), x) self.assertEqual(expr1.subs(t, 4), x**2) self.assertEqual(expr1.subs(t, 7), x**2) self.assertEqual(expr1.subs(t, 8), x**3) self.assertEqual(expr1.subs(t, 9), x**3) self.assertEqual(expr1.subs(t, 12), x**3) self.assertEqual(expr1.subs(t, 12.00000001), -13) self.assertEqual(expr1.subs(t, 1e50), -13) def test_create_piecewise_poly(self): x, t = sp.symbols("x, t") conditions = [(0, 0, 0), # t= 0: x=0, x_dot=0 (2, 1), # t= 2: x=1, x_dot=<not defined> (3, 1, 0, 0 ), # t= 2: x=1, x_dot=0, x_ddot=0 (5, 2, 0, 0 ), # t= 2: x=1, x_dot=0, x_ddot=0 # smooth curve finished ] res1 = st.create_piecewise_poly(t, *conditions) self.assertEqual(res1.func(0), 0) self.assertEqual(res1.func(2), 1) self.assertEqual(res1.func(3), 1) self.assertEqual(res1.func(5), 2) self.assertEqual(res1.expr.diff(t, 2).subs(t, 5), 0) def test_do_laplace_deriv(self): t, s = sp.symbols('t, s') x1, x2, x3 = xx = st.symb_vector('x1:4') x1dot, x2dot, x3dot = st.time_deriv(xx, xx) x1ddot, x2ddot, x3ddot = st.time_deriv(xx, xx, order=2) expr1 = 5 expr2 = 5*s*t**2 - 7*t + 2 expr3 = 1*s**2*x1 - 7*s*x2*t + 2 res = st.do_laplace_deriv(expr1, s, t) ex_res = 5 self.assertEqual(res, ex_res) res = st.do_laplace_deriv(expr2, s, t) ex_res = 10*t - 7*t + 2 self.assertEqual(res, ex_res) res = st.do_laplace_deriv(expr3, s, t) ex_res = -7 * x2 + 2 self.assertEqual(res, ex_res) res = st.do_laplace_deriv(expr3, s, t, tds=xx) ex_res = x1ddot - 7 * x2 + - 7*x2dot*t + 2 self.assertEqual(res, ex_res) # noinspection PyShadowingNames,PyPep8Naming,PySetFunctionToLiteral class TestControlMethods1(unittest.TestCase): def setUp(self): pass def test_kalman_matrix(self): k, J, R, L = sp.symbols('k, J, R, L') A = sp.Matrix([[0, 1, 0], [0, 0, k/J], [0, -k/L, -R/L]]) B = sp.Matrix([0, 0, 1/L]) Qref = sp.Matrix([[0, 0, k/L/J], [0, k/L/J, -k*R/J/L**2 ], [1/L, -R/L**2, -k**2/J/L**2 + R**2/L**3 ]]) Q = st.kalman_matrix(A, B) self.assertEqual(Q, Qref) def test_nl_cont_matrix(self): # for simplicity test with a linear example k, J, R, L = sp.symbols('k, J, R, L') A = sp.Matrix([[0, 1, 0], [0, 0, k/J], [0, -k/L, -R/L]]) B = sp.Matrix([0, 0, 1/L]) Qref = sp.Matrix([[0, 0, k/L/J], [0, k/L/J, -k*R/J/L**2 ], [1/L, -R/L**2, -k**2/J/L**2 + R**2/L**3 ]]) xx = st.symb_vector("x1:4") ff = A*xx gg = B Qnl = st.nl_cont_matrix(ff, gg, xx) self.assertEqual(Qnl, Qref) def test_siso_place(self): n = 6 A = sp.randMatrix(n, n, seed=1648, min=-10, max=10) b = sp.randMatrix(n, 1, seed=1649, min=-10, max=10) ev = np.sort(np.random.random(n) * 10) f = st.siso_place(A, b, ev) A2 = st.to_np(A + b*f.T) ev2 = np.sort( np.linalg.eigvals(A2) ) diff = np.sum(np.abs((ev - ev2)/ev)) self.assertTrue(diff < 1e-6) def test_siso_place2(self): n = 4 A = sp.randMatrix(n, n, seed=1648, min=-10, max=10) b = sp.randMatrix(n, 1, seed=1649, min=-10, max=10) omega = np.pi*2/2.0 ev = np.sort([1j*omega, -1j*omega, -2, -3]) f = st.siso_place(A, b, ev) A2 = st.to_np(A + b*f.T) ev2 = np.sort(
np.linalg.eigvals(A2)
numpy.linalg.eigvals
""" - Bootstrapping is a resampling method. - In statistics, resampling entails the use of many samples generated from an original sample. In machine learning terms, the sample is our training data. - The main idea is to use the original sample as the population (the whole domain of our problem) and the generated sub-samples as samples Creating bootstrap samples: - To create bootstrap samples, we resample with replacement (each instance may be selected multiple times) from our original sample. - This means that a single instance can be selected multiple times. - Suppose we have data for 100 individuals. The data contains the wieght and height of each individual. If we generate random numbers from 1 to 100 and add the corresponding data to a new dataset, we have essentially created a bootstrap sample """ # Step 1: Import libraries and load dataset import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_diabetes diabetes= load_diabetes() # Step 2: Print the original sample's statistics target = diabetes.target print(np.mean(target)) print(np.std(target)) """ 152.13348416289594 77.00574586945044 - We then create the bootstrap samples and statistics and store them in bootstrap_stats. - We could store the whole bootstrap samples, but it is not memory efficient to do so. - Furthermore, we can only care about the statistics, so it makes sense only to store them. - Here we create 10000 bootstrap samples and statistics """ # Step 3: We need to create the bootstrap samples and statistics bootstrap_stats =[] for _ in range(10000): bootstrap_sample = np.random.choice(target, size=len(target)) print(bootstrap_sample) mean= np.mean(bootstrap_sample) std = np.std(bootstrap_sample) bootstrap_stats.append((mean, std)) bootstrap_stats = np.array(bootstrap_stats) bootstrap_sample """ array([[153.90045249, 76.42341004], [148.65384615, 76.83534194], [152.75791855, 78.10022418], ..., [154.10180995, 75.97916508], [152.64027149, 80.31653728], [155.76923077, 80.39673208]]) """ """ - We can plot the histogram of the mean and standard deviation, as well as calcualte the standard error (that is the standard deviation of the statistic's distributions) for each """ # Step 4: plot the histogram of the mean and standard deviation, as well as calcualte the standard error plt.figure() plt.subplot(2,1,1) std_err = np.std(bootstrap_stats[:,0]) plt.title('Mean, Std. Error: %.2f'%std_err) plt.hist(bootstrap_stats[:,0], bins=20) plt.subplot(2,1,2) std_err =
np.std(bootstrap_stats[:,1])
numpy.std
from copy import deepcopy as pydeepcopy from typing import Type import numpy as np from os import path, scandir, makedirs from random import uniform from tempfile import gettempdir, tempdir def add(a, b): return a + b def almost_equal(mat1, mat2): return np.allclose(mat1, mat2, atol=0.000_01) def count_columns(matrix): if not is_matrix(matrix): _raise_not_a_matrix() if is_hvector(matrix): return len(matrix) return matrix.shape[1] def count_rows(matrix): if not is_matrix(matrix): _raise_not_a_matrix() return 1 if is_hvector(matrix) else matrix.shape[0] def create_augmented(matA, matb): """ A new matrix is returned and inputs are not modified. """ if not is_vector(matb): raise ValueError("Matrix b must be a vector.") return ( np.hstack((matA, matb)) if is_vvector(matb) else np.hstack((matA, reshape(matb, (-1, 1)))) ) def create_based_on_diagonal_terms(matrix): if not is_matrix(matrix): _raise_not_a_matrix() if not is_square(matrix): raise ValueError("Input matrix must be square.") diag = np.diag(matrix) size = matrix.shape[0] ret = create_zeros(size, size) np.fill_diagonal(ret, diag) return ret def create_based_on_l_component(matrix): if not is_square(matrix): raise ValueError("Input matrix must be square") return np.tril(matrix) def create_based_on_non_diagonal_terms(matrix): if not is_matrix(matrix): _raise_not_a_matrix() if not is_square(matrix): raise ValueError("Input matrix must be square") ret = deepcopy(matrix) np.fill_diagonal(ret, 0.0) return ret def create_based_on_u_component(matrix, diag_zero: bool = True): if not is_square(matrix): raise ValueError("Input matrix must be square") offset = 1 if diag_zero else 0 return np.triu(matrix, offset) def _create_fill_with_constant(const_val, rows: int = None, columns: int = None): if not rows or rows < 1: rows = 1 if columns and columns < 1: raise ValueError("Columns is {}, but expected at least 1.".format(columns)) inp = (rows, columns) if columns is not None and columns > 0 else rows if const_val == 1: return np.ones(inp) else: return np.zeros(inp) def create_identity(size): if not isinstance(size, int): raise TypeError("SIZE expected to be type in, but here is %s" % type(size)) if size < 1: raise ValueError("SIZE expected to be greater than 0, here is %i" % size) return np.identity(size) def create_inverted(matrix): if not is_matrix(matrix): _raise_not_a_matrix() if not is_square(matrix) or is_singular(matrix): raise ValueError("Only square, non-singular matrices have an inverse") # It seems that we don't need to care about creating diagonal only # inverse (jacobi) and an entire matrix inverse. This function seems # to handle them both. ret = np.linalg.inv(matrix) return ret def create_ones(rows: int = None, columns: int = None): return _create_fill_with_constant(1, rows, columns) def create_random(size, single_column=False): if size < 2 or size > 500: raise ValueError("size input must be between 2..500, here is %i" % size) columns = 1 if single_column else size b = 20 a = -b rng = np.random.default_rng() # from np.random.Generator.random docs.. # To sample from interval [a, b), b > a; multiply random by (b-a) and add a return (b - a) * rng.random((size, columns)) + a def create_random_diagonal_dominate(size): ABRITARY_SCALAR = 2 mat = create_random(size) sums = [np.sum(np.fabs(mat[r, :])) * ABRITARY_SCALAR for r in range(size)] for i in range(size): mat[i, i] = sums[i] return mat def create_zeros(rows: int = None, columns: int = None): return _create_fill_with_constant(0, rows, columns) def deepcopy(inp, do_it=True): """ do_it: (True) actually perform deepcopy; otherwise, return input. For cases when some other higher function might have already performed a deepcopy. """ if do_it: return pydeepcopy(inp) return inp def is_in_crout_l_form(matrix): if not is_square(matrix): return False # For every row, does every column to the right of diagonal have a value of 0.0? size = count_rows(matrix) temp = matrix[np.triu_indices(size, k=1)] zeros = create_zeros(temp.size) return np.allclose(temp, zeros, atol=0.00001) def is_in_reduced_row_echelon(matrix): rows, columns = matrix.shape if rows > columns: return False expect = np.ones((rows,), dtype=float) if not np.allclose(matrix.diagonal(), expect): return False for r in range(rows): if r == 0: continue indices = np.arange(r) row = np.take(matrix[r, :], indices) expect = np.zeros(indices.shape[0], dtype=float) if not np.allclose(row, expect): return False return True def is_augmented(matrix): rows, cols = matrix.shape return cols == rows + 1 def is_hvector(matrix): if not is_matrix(matrix): return False if len(matrix.shape) == 1: rows = 1 cols = matrix.shape[0] else: rows, cols = matrix.shape return rows == 1 and cols >= rows def is_matrix(matrix): # Have to confirm if matrix by checking shape length because "None" can # be considered a Matrix (for some reason) if isinstance(matrix, np.ndarray) and len(matrix.shape) > 0: return True return False def is_singular(matrix, allow_augmented=True): is_aug = is_augmented(matrix) is_sqr = is_square(matrix) if not (is_sqr ^ is_aug): return False temp = matrix[:, : matrix.shape[1] - 1] if is_aug else matrix sign, logdet = np.linalg.slogdet(temp) # print('sign={};logdet={}'.format(sign, logdet)) return sign == 0 and logdet == -np.inf def is_square(matrix): if not is_matrix(matrix): return False if is_vector(matrix) and len(matrix.shape) < 2: return matrix.shape[0] == 1 rows, cols = matrix.shape return rows > 0 and rows == cols def is_vector(matrix): return is_hvector(matrix) or is_vvector(matrix) def is_vvector(matrix): if not is_matrix(matrix): return False if is_hvector(matrix): return matrix.shape[0] == 1 rows, cols = matrix.shape return cols == 1 and rows >= cols def load_files(directory, do_reshape=None, omega_as_matrix: bool = False): """ Load data from text files ine given directory Parameters: directory (str): The path, relative to the current directory, for which to seach for files do_reshape (bool): False, do not perform implicit reshaping based on file name; otherwise, automatic reshaping is performed. Returns: (Matrix A, Matrix b, Matrix Soln) """ def _call_load(fp): return np.loadtxt(fp, dtype=float, delimiter=" ") ret = {} do_reshape = True if do_reshape is None else do_reshape with scandir(directory) as files: for file in files: fname, _ = path.splitext(path.basename(file)) with open(file, "r") as fp: if fname == "A": obj = _call_load(fp) fname = "mat" + fname elif fname == "b": obj = _call_load(fp) if do_reshape: obj = reshape(obj, (-1, 1)) fname = "mat" + fname elif fname == "omega": obj = _call_load(fp) if not omega_as_matrix: obj = obj.item(0) elif fname == "soln": obj = _call_load(fp) if do_reshape: obj = reshape(obj, (-1, 1)) fname = "mat" + fname ret[fname] = obj return ret def multiply(a, b): if not is_matrix(a): _raise_not_a_matrix() if is_matrix(b): return np.matmul(a, b) elif isinstance(b, (int, float)): return a * b else: raise TypeError( "Argument 2 expected to be matrix or scalar, but here is %s" % type(b).__name__ ) def multiply_row_by_scalar(matrix, row, scalar, inplace=True): """ inplace: (True) input matrix is modified as part of the operation; otherwise, input matrix is copied. """ if row >= matrix.shape[0]: return matrix ret = deepcopy(matrix, (not inplace)) # print('multiplying row %i by %.6f' % (row, scalar)) ret[row, :] = (ret[row, :] * scalar) + 0.0 return ret def reshape(matrix, newshape=None): if not is_matrix(matrix): _raise_not_a_matrix() if newshape is None: newshape = -1 elif isinstance(newshape, int): if not (newshape > 0 or newshape == -1): raise ValueError("Argument newshape value must be -1 or greater than 0.") # If newshape is just 1, caller only wants a single row, spare them the pain # and just give them one row. if newshape == 1: newshape = -1 elif isinstance(newshape, tuple): if not all([isinstance(v, int) for v in newshape]): raise TypeError("Argument newshape must be of type int or tuple of ints.") if not all([v > 0 or v == -1 for v in newshape]): raise ValueError("Argument newshape values must be -1 or greater than 0.") else: raise TypeError("Argument newshape must be of type int or tuple of ints.") return matrix.reshape(newshape) def set_rows_below_to_zero(matrix, base_row, inplace=True): """ inplace: (True) input matrix is modified as part of the operation; otherwise, input matrix is copied. """ if base_row + 1 >= matrix.shape[0]: return matrix ret = deepcopy(matrix, (not inplace)) # print('set rows below %i to 0 [in]' % base_row) # print(ret) for row in range(base_row + 1, ret.shape[0]): ret = subtract_scalar_row_from_row(ret, base_row, row) # print('set rows below %i to 0 [out]' % base_row) # print(ret) return ret def set_row_diagonal_to_one(matrix, row, inplace=True): """ inplace: (True) input matrix is modified as part of the operation; otherwise, input matrix is copied. """ if row >= matrix.shape[0]: return matrix ret = deepcopy(matrix, (not inplace)) # print('set row %i diagonal to 1 [in]' % row) # print(ret) diag = ret[row, row] ret = multiply_row_by_scalar(ret, row, (1.0 / diag)) if ret[row, row] < 0: ret = multiply_row_by_scalar(ret, row, -1.0) # print('set row %i diagonal to 1 [out]' % row) # print(ret) return ret def subtract(mat1, mat2): return np.subtract(mat1, mat2) def subtract_scalar_row_from_row(matrix, src_row, aff_row, inplace=True): """ inplace: (True) input matrix is modified as part of the operation; otherwise, input matrix is copied. """ if aff_row >= matrix.shape[0]: return matrix ret = deepcopy(matrix, (not inplace)) # print('subtract scalar row of %i from row %i [in]' % (src_row, aff_row)) # print(ret) # what will it take to make the row leading value 0? # 1. take the leading value, # 2. multiply source row by leading value to get scalar row # 3. subtract scalar row from current row, # 4. multiply source row by inverse of leading value. lead = ret[aff_row, src_row] ret[aff_row, src_row:] = ret[aff_row, src_row:] - (ret[src_row, src_row:] * lead) # print('subtract scalar row of %i from row %i [out]' % (src_row, aff_row)) # print(ret) return ret def swap_largest_pivot_to_top(matrix, pivot, inplace=True): """ inplace: (True) input matrix is modified as part of the operation; otherwise, input matrix is copied. """ if pivot >= matrix.shape[0]: return matrix ret = deepcopy(matrix, (not inplace)) # print('swap rows [in]') # print(ret) cols = np.fabs(np.reshape(ret[pivot:, pivot], (-1, 1))) big, _ = np.where(cols == np.max(cols)) swap = int(big) + pivot if swap > pivot: # print('swaping row %i with %i' % (pivot, swap)) ret[[pivot, swap]] = ret[[swap, pivot]] # print('swap rows [out]') # print(ret) return ret def to_reduced_row_echelon(matrix): """ This implementation does not yield a true reduced row echelon form matrix. To get the actual result vector, you'll have to calculate using back solving. """ if is_in_reduced_row_echelon(matrix): return matrix # deepcopy because we don't want to muck with the original ret = deepcopy(matrix) for r in range(matrix.shape[0]): ret = swap_largest_pivot_to_top(ret, r) ret = set_row_diagonal_to_one(ret, r) ret = set_rows_below_to_zero(ret, r) if not is_in_reduced_row_echelon(ret): # print(ret) raise RuntimeError("Matrix not in reduced row echelon form when expected.") return ret def two_norm_of_error(matA, matb, matx): if matA is None or matb is None or matx is None: raise ValueError( "Either input MatrixA, MatrixB, or solution matrix MatrixX is None." ) x = matx if is_vvector(matx) else reshape(matx, (-1, 1)) ret = np.linalg.norm(subtract(multiply(matA, x), matb)) return ret def percent_error(vec_actual, vec_expected): top = np.sum(np.fabs(subtract(vec_expected, vec_actual))) bot = np.sum(vec_expected) try: with
np.errstate(divide="ignore")
numpy.errstate
import numpy as np import pytest from pytest import approx from beliefs.inference.belief_propagation import BeliefPropagation, ConflictingEvidenceError from beliefs.factors.cpd import TabularCPD from beliefs.models.belief_update_node_model import ( BeliefUpdateNodeModel, BernoulliOrNode, BernoulliAndNode, Node ) @pytest.fixture(scope='module') def edges_five_nodes(): """Edges define a polytree with 5 nodes (connected in an X-shape with the node, 'x', at the center of the X.""" edges = [('u', 'x'), ('v', 'x'), ('x', 'y'), ('x', 'z')] return edges @pytest.fixture(scope='module') def simple_edges(): """Edges define a polytree with 15 nodes.""" edges = [('1', '3'), ('2', '3'), ('3', '5'), ('4', '5'), ('5', '10'), ('5', '9'), ('6', '8'), ('7', '8'), ('8', '9'), ('9', '11'), ('9', 'x'), ('14', 'x'), ('x', '12'), ('x', '13')] return edges @pytest.fixture(scope='module') def many_parents_edges(): """Node 62 has 18 parents and no children.""" edges = [('96', '62'), ('80', '62'), ('98', '62'), ('100', '62'), ('86', '62'), ('102', '62'), ('104', '62'), ('64', '62'), ('106', '62'), ('108', '62'), ('110', '62'), ('112', '62'), ('114', '62'), ('116', '62'), ('118', '62'), ('122', '62'), ('70', '62'), ('94', '62')] return edges @pytest.fixture(scope='function') def five_node_model(edges_five_nodes): return BeliefUpdateNodeModel.init_from_edges(edges_five_nodes, BernoulliOrNode) @pytest.fixture(scope='function') def simple_model(simple_edges): return BeliefUpdateNodeModel.init_from_edges(simple_edges, BernoulliOrNode) @pytest.fixture(scope='function') def many_parents_model(many_parents_edges): return BeliefUpdateNodeModel.init_from_edges(many_parents_edges, BernoulliOrNode) @pytest.fixture(scope='function') def many_parents_and_model(many_parents_edges): return BeliefUpdateNodeModel.init_from_edges(many_parents_edges, BernoulliAndNode) @pytest.fixture(scope='function') def one_node_model(): a_node = BernoulliOrNode(label_id='x', children=[], parents=[]) return BeliefUpdateNodeModel(nodes_dict={'x': a_node}) @pytest.fixture(scope='function') def five_node_and_model(edges_five_nodes): return BeliefUpdateNodeModel.init_from_edges(edges_five_nodes, BernoulliAndNode) @pytest.fixture(scope='function') def mixed_cpd_model(edges_five_nodes): """ X-shaped 5 node model plus one more node, 'w', with edge from 'w' to 'z'. 'z' is an AND node while all other nodes are OR nodes. """ u_node = BernoulliOrNode(label_id='u', children=['x'], parents=[]) v_node = BernoulliOrNode(label_id='v', children=['x'], parents=[]) x_node = BernoulliOrNode(label_id='x', children=['y', 'z'], parents=['u', 'v']) y_node = BernoulliOrNode(label_id='y', children=[], parents=['x']) z_node = BernoulliAndNode(label_id='z', children=[], parents=['x', 'w']) w_node = BernoulliOrNode(label_id='w', children=['z'], parents=[]) return BeliefUpdateNodeModel(nodes_dict={'u': u_node, 'v': v_node, 'x': x_node, 'y': y_node, 'z': z_node, 'w': w_node}) @pytest.fixture(scope='function') def custom_cpd_model(): """ Y-shaped model, with parents ,'u' and 'v' as Or-nodes, 'x' a node with cardinality 3 and custom CPD, 'y' a node with cardinality 2 and custom CPD. """ custom_cpd_x = TabularCPD(variable='x', variable_card=3, parents=['u', 'v'], parents_card=[2, 2], values=[[0.2, 0, 0.3, 0.1], [0.4, 1, 0.7, 0.2], [0.4, 0, 0, 0.7]], state_names={'x': ['lo', 'med', 'hi'], 'u': ['False', 'True'], 'v': ['False', 'True']}) custom_cpd_y = TabularCPD(variable='y', variable_card=2, parents=['x'], parents_card=[3], values=[[0.3, 0.1, 0], [0.7, 0.9, 1]], state_names={'x': ['lo', 'med', 'hi'], 'y': ['False', 'True']}) u_node = BernoulliOrNode(label_id='u', children=['x'], parents=[]) v_node = BernoulliOrNode(label_id='v', children=['x'], parents=[]) x_node = Node(children=['y'], cpd=custom_cpd_x) y_node = Node(children=[], cpd=custom_cpd_y) return BeliefUpdateNodeModel(nodes_dict={'u': u_node, 'v': v_node, 'x': x_node, 'y': y_node}) def get_label_mapped_to_positive_belief(query_result): """Return a dictionary mapping each label_id to the probability of the label being True.""" return {label_id: belief[1] for label_id, belief in query_result.items()} def compare_dictionaries(expected, observed): for key, expected_value in expected.items(): observed_value = observed.get(key) if observed_value is None: raise KeyError("Expected key {} not in observed.") assert observed_value == approx(expected_value), \ "Expected {} but got {}".format(expected_value, observed_value) #============================================================================================== # Tests of single Bernoulli node model def test_no_evidence_one_node_model(one_node_model): expected = {'x': 0.5} infer = BeliefPropagation(one_node_model) query_result = infer.query(evidence={}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_virtual_evidence_one_node_model(one_node_model): """Curator thinks YES is 10x more likely than NO based on virtual evidence.""" expected = {'x': 5/(0.5+5)} infer = BeliefPropagation(one_node_model) query_result = infer.query(evidence={'x': np.array([1, 10])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_MAYBE_default_evidence_one_node_model(one_node_model): expected = {'x': 0.5} infer = BeliefPropagation(one_node_model) query_result = infer.query(evidence={'x': np.array([0.5, 0.5])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_YES_evidence_one_node_model(one_node_model): expected = {'x': 1} infer = BeliefPropagation(one_node_model) query_result = infer.query(evidence={'x': np.array([0, 1])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_NO_evidence_one_node_model(one_node_model): expected = {'x': 0} infer = BeliefPropagation(one_node_model) query_result = infer.query(evidence={'x': np.array([1, 0])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) #============================================================================================== # Tests of 5-node, 4-edge model def test_no_evidence_five_node_model(five_node_model): expected = {'x': 1-0.5**2} infer = BeliefPropagation(five_node_model) query_result = infer.query(evidence={}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_virtual_evidence_for_node_x_five_node_model(five_node_model): """Virtual evidence for node x.""" expected = {'x': 0.967741935483871, 'y': 0.967741935483871, 'z': 0.967741935483871, 'u': 0.6451612903225806, 'v': 0.6451612903225806} infer = BeliefPropagation(five_node_model) query_result = infer.query(evidence={'x': np.array([1, 10])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) #============================================================================================== # Tests of 5-node, 4-edge model with AND cpds def test_no_evidence_five_node_and_model(five_node_and_model): expected = {'x': 0.5**2} infer = BeliefPropagation(five_node_and_model) query_result = infer.query(evidence={}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_one_parent_false_five_node_and_model(five_node_and_model): expected = {'x': 0} infer = BeliefPropagation(five_node_and_model) query_result = infer.query(evidence={'u': np.array([1,0])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_one_parent_true_five_node_and_model(five_node_and_model): expected = {'x': 0.5} infer = BeliefPropagation(five_node_and_model) query_result = infer.query(evidence={'u': np.array([0,1])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_both_parents_true_five_node_and_model(five_node_and_model): expected = {'x': 1, 'y': 1, 'z': 1} infer = BeliefPropagation(five_node_and_model) query_result = infer.query(evidence={'u': np.array([0,1]), 'v': np.array([0,1])}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) #============================================================================================== # Tests of mixed cpd model (all CPDs are OR, except for one AND node with 2 parents) def test_no_evidence_mixed_cpd_model(mixed_cpd_model): expected = {'x': 1-0.5**2, 'z': 0.5*(1-0.5**2)} infer = BeliefPropagation(mixed_cpd_model) query_result = infer.query(evidence={}) result = get_label_mapped_to_positive_belief(query_result) compare_dictionaries(expected, result) def test_x_false_w_true_mixed_cpd_model(mixed_cpd_model): expected = {'u': 0, 'v': 0, 'y': 0, 'z': 0} infer = BeliefPropagation(mixed_cpd_model) query_result = infer.query(evidence={'x':
np.array([1,0])
numpy.array
""" Module: LMR_verify_gridPRCP.py Purpose: Generates spatial verification statistics of LMR gridded precipitation against various gridded historical instrumental precipitation datasets and precipitation from reanalyses. Originator: <NAME>, U. of Washington, March 2016 Revisions: """ import matplotlib # need to do this backend when running remotely or to suppress figures interactively matplotlib.use('Agg') # generic imports import numpy as np import glob, os, sys, calendar from datetime import datetime, timedelta from netCDF4 import Dataset, date2num, num2date import mpl_toolkits.basemap as bm import matplotlib.pyplot as plt from matplotlib import ticker from spharm import Spharmt, getspecindx, regrid # LMR specific imports sys.path.append('../') from LMR_utils import global_hemispheric_means, assimilated_proxies, coefficient_efficiency from load_gridded_data import read_gridded_data_CMIP5_model from LMR_plot_support import * # change default value of latlon kwarg to True. bm.latlon_default = True ################################## # START: set user parameters here ################################## # option to suppress figures iplot = True iplot_individual_years = False # centered time mean (nya must be odd! 3 = 3 yr mean; 5 = 5 year mean; etc 0 = none) nya = 0 # option to print figures fsave = True #fsave = False # set paths, the filename for plots, and global plotting preferences # override datadir #datadir_output = './data/' #datadir_output = '/home/disk/kalman2/wperkins/LMR_output/archive' datadir_output = '/home/disk/kalman3/rtardif/LMR/output' #datadir_output = '/home/disk/ekman4/rtardif/LMR/output' #datadir_output = '/home/disk/kalman3/hakim/LMR' # Directories where precip and reanalysis data can be found datadir_precip = '/home/disk/kalman3/rtardif/LMR/data/verification' datadir_reanl = '/home/disk/kalman3/rtardif/LMR/data/model' # file specification # # current datasets # --- #nexp = 'production_gis_ccsm4_pagesall_0.75' #nexp = 'production_mlost_ccsm4_pagesall_0.75' #nexp = 'production_cru_ccsm4_pagesall_0.75' #nexp = 'production_mlost_era20c_pagesall_0.75' #nexp = 'production_mlost_era20cm_pagesall_0.75' # --- nexp = 'test' # --- # perform verification using all recon. MC realizations ( MCset = None ) # or over a custom selection ( MCset = (begin,end) ) # ex. MCset = (0,0) -> only the first MC run # MCset = (0,10) -> the first 11 MC runs (from 0 to 10 inclusively) # MCset = (80,100) -> the 80th to 100th MC runs (21 realizations) MCset = None #MCset = (0,10) # Definition of variables to verify # kind name variable long name bounds units mult. factor verif_dict = \ { 'pr_sfc_Amon' : ('anom', 'PRCP', 'Precipitation',-400.0,400.0,'(mm/yr)',1.0), \ } # time range for verification (in years CE) #trange = [1979,2000] #works for nya = 0 trange = [1880,2000] #works for nya = 0 #trange = [1900,2000] #works for nya = 0 #trange = [1885,1995] #works for nya = 5 #trange = [1890,1990] #works for nya = 10 # reference period over which mean is calculated & subtracted # from all datasets (in years CE) # NOTE: GPCP and CMAP data cover the 1979-2015 period ref_period = [1979, 1999] valid_frac = 0.0 # number of contours for plots nlevs = 21 # plot alpha transparency alpha = 0.5 # set the default size of the figure in inches. ['figure.figsize'] = width, height; # aspect ratio appears preserved on smallest of the two plt.rcParams['figure.figsize'] = 10, 10 # that's default image size for this interactive session plt.rcParams['axes.linewidth'] = 2.0 # set the value globally plt.rcParams['font.weight'] = 'bold' # set the font weight globally plt.rcParams['font.size'] = 11 # set the font size globally #plt.rc('text', usetex=True) plt.rc('text', usetex=False) ################################## # END: set user parameters here ################################## verif_vars = list(verif_dict.keys()) workdir = datadir_output + '/' + nexp print('working directory = ' + workdir) print('\n getting file system information...\n') # get number of mc realizations from directory count # RT: modified way to determine list of directories with mc realizations # get a listing of the iteration directories dirs = glob.glob(workdir+"/r*") # selecting the MC iterations to keep if MCset: dirset = dirs[MCset[0]:MCset[1]+1] else: dirset = dirs mcdir = [item.split('/')[-1] for item in dirset] niters = len(mcdir) print('mcdir:' + str(mcdir)) print('niters = ' + str(niters)) # Loop over verif. variables for var in verif_vars: # read ensemble mean data print('\n reading LMR ensemble-mean data...\n') first = True k = -1 for dir in mcdir: k = k + 1 ensfiln = workdir + '/' + dir + '/ensemble_mean_'+var+'.npz' npzfile = np.load(ensfiln) print(dir, ':', npzfile.files) tmp = npzfile['xam'] print('shape of tmp: ' + str(np.shape(tmp))) if first: first = False recon_times = npzfile['years'] LMR_time = np.array(list(map(int,recon_times))) lat = npzfile['lat'] lon = npzfile['lon'] nlat = npzfile['nlat'] nlon = npzfile['nlon'] lat2 = np.reshape(lat,(nlat,nlon)) lon2 = np.reshape(lon,(nlat,nlon)) years = npzfile['years'] nyrs = len(years) xam = np.zeros([nyrs,np.shape(tmp)[1],np.shape(tmp)[2]]) xam_all = np.zeros([niters,nyrs,np.shape(tmp)[1],np.shape(tmp)[2]]) xam = xam + tmp xam_all[k,:,:,:] = tmp # this is the sample mean computed with low-memory accumulation xam = xam/len(mcdir) # this is the sample mean computed with numpy on all data xam_check = xam_all.mean(0) # check.. max_err = np.max(np.max(np.max(xam_check - xam))) if max_err > 1e-4: print('max error = ' + str(max_err)) raise Exception('sample mean does not match what is in the ensemble files!') # sample variance xam_var = xam_all.var(0) print(np.shape(xam_var)) print('\n shape of the ensemble array: ' + str(np.shape(xam_all)) +'\n') print('\n shape of the ensemble-mean array: ' + str(np.shape(xam)) +'\n') # Convert units to match verif dataset: from kg m-2 s-1 to mm (per year) rho = 1000.0 for y in range(nyrs): if calendar.isleap(int(years[y])): xam[y,:,:] = 1000.*xam[y,:,:]*366.*86400./rho else: xam[y,:,:] = 1000.*xam[y,:,:]*365.*86400./rho ################################################################# # BEGIN: load verification data # ################################################################# print('\nloading verification data...\n') # GPCP ---------------------------------------------------------- infile = datadir_precip+'/'+'GPCP/'+'GPCPv2.2_precip.mon.mean.nc' verif_data = Dataset(infile,'r') # Time time = verif_data.variables['time'] time_obj = num2date(time[:],units=time.units) time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))]) yrs_range = list(set(time_yrs)) # lat/lon verif_lat = verif_data.variables['lat'][:] verif_lon = verif_data.variables['lon'][:] nlat_GPCP = len(verif_lat) nlon_GPCP = len(verif_lon) lon_GPCP, lat_GPCP = np.meshgrid(verif_lon, verif_lat) # Precip verif_precip_monthly = verif_data.variables['precip'][:] [ntime,nlon_v,nlat_v] = verif_precip_monthly.shape # convert mm/day monthly data to mm/year yearly data GPCP_time = np.zeros(shape=len(yrs_range),dtype=np.int) GPCP = np.zeros(shape=[len(yrs_range),nlat_GPCP,nlon_GPCP]) i = 0 for yr in yrs_range: GPCP_time[i] = int(yr) inds = np.where(time_yrs == yr)[0] if calendar.isleap(yr): nbdays = 366. else: nbdays = 365. accum = np.zeros(shape=[nlat_GPCP, nlon_GPCP]) for k in range(len(inds)): days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1] accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month GPCP[i,:,:] = accum # precip in mm i = i + 1 # CMAP ---------------------------------------------------------- infile = datadir_precip+'/'+'CMAP/'+'CMAP_enhanced_precip.mon.mean.nc' verif_data = Dataset(infile,'r') # Time time = verif_data.variables['time'] time_obj = num2date(time[:],units=time.units) time_yrs = np.asarray([time_obj[k].year for k in range(len(time_obj))]) yrs_range = list(set(time_yrs)) # lat/lon verif_lat = verif_data.variables['lat'][:] verif_lon = verif_data.variables['lon'][:] nlat_CMAP = len(verif_lat) nlon_CMAP = len(verif_lon) lon_CMAP, lat_CMAP = np.meshgrid(verif_lon, verif_lat) # Precip verif_precip_monthly = verif_data.variables['precip'][:] [ntime,nlon_v,nlat_v] = verif_precip_monthly.shape # convert mm/day monthly data to mm/year yearly data CMAP_time = np.zeros(shape=len(yrs_range),dtype=np.int) CMAP = np.zeros(shape=[len(yrs_range),nlat_CMAP,nlon_CMAP]) i = 0 for yr in yrs_range: CMAP_time[i] = int(yr) inds = np.where(time_yrs == yr)[0] if calendar.isleap(yr): nbdays = 366. else: nbdays = 365. accum = np.zeros(shape=[nlat_CMAP, nlon_CMAP]) for k in range(len(inds)): days_in_month = calendar.monthrange(time_obj[inds[k]].year, time_obj[inds[k]].month)[1] accum = accum + verif_precip_monthly[inds[k],:,:]*days_in_month CMAP[i,:,:] = accum # precip in mm i = i + 1 # ---------- # Reanalyses # ---------- # Define month sequence for the calendar year # (argument needed in upload of reanalysis data) annual = list(range(1,13)) # 20th Century reanalysis (TCR) --------------------------------- vardict = {var: verif_dict[var][0]} vardef = var datadir = datadir_reanl +'/20cr' datafile = vardef +'_20CR_185101-201112.nc' dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual, anom_ref=ref_period) rtime = dd[vardef]['years'] TCR_time = np.array([d.year for d in rtime]) lats = dd[vardef]['lat'] lons = dd[vardef]['lon'] latshape = lats.shape lonshape = lons.shape if len(latshape) == 2 & len(lonshape) == 2: # stored in 2D arrays lat_TCR = np.unique(lats) lon_TCR = np.unique(lons) nlat_TCR, = lat_TCR.shape nlon_TCR, = lon_TCR.shape else: # stored in 1D arrays lon_TCR = lons lat_TCR = lats nlat_TCR = len(lat_TCR) nlon_TCR = len(lon_TCR) lon2_TCR, lat2_TCR = np.meshgrid(lon_TCR, lat_TCR) TCRfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field TCR = dd[vardef]['value'] # Anomalies # Conversion from kg m-2 s-1 rho = 1000.0 i = 0 for y in TCR_time: if calendar.isleap(y): TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*366.*86400./rho TCR[i,:,:] = 1000.*TCR[i,:,:]*366.*86400./rho else: TCRfull[i,:,:] = 1000.*TCRfull[i,:,:]*365.*86400./rho TCR[i,:,:] = 1000.*TCR[i,:,:]*365.*86400./rho i = i + 1 # ERA 20th Century reanalysis (ERA20C) --------------------------------- vardict = {var: verif_dict[var][0]} vardef = var datadir = datadir_reanl +'/era20c' datafile = vardef +'_ERA20C_190001-201012.nc' dd = read_gridded_data_CMIP5_model(datadir,datafile,vardict,outtimeavg=annual, anom_ref=ref_period) rtime = dd[vardef]['years'] ERA_time = np.array([d.year for d in rtime]) lats = dd[vardef]['lat'] lons = dd[vardef]['lon'] latshape = lats.shape lonshape = lons.shape if len(latshape) == 2 & len(lonshape) == 2: # stored in 2D arrays lat_ERA = np.unique(lats) lon_ERA = np.unique(lons) nlat_ERA, = lat_ERA.shape nlon_ERA, = lon_ERA.shape else: # stored in 1D arrays lon_ERA = lons lat_ERA = lats nlat_ERA = len(lat_ERA) nlon_ERA = len(lon_ERA) lon2_ERA, lat2_ERA = np.meshgrid(lon_ERA, lat_ERA) ERAfull = dd[vardef]['value'] + dd[vardef]['climo'] # Full field ERA = dd[vardef]['value'] # Anomalies # Conversion from kg m-2 s-1 rho = 1000.0 i = 0 for y in ERA_time: if calendar.isleap(y): ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*366.*86400./rho ERA[i,:,:] = 1000.*ERA[i,:,:]*366.*86400./rho else: ERAfull[i,:,:] = 1000.*ERAfull[i,:,:]*365.*86400./rho ERA[i,:,:] = 1000.*ERA[i,:,:]*365.*86400./rho i = i + 1 # Plots of precipitation climatologies --- # Climatology (annual accumulation) GPCP_climo = np.nanmean(GPCP, axis=0) CMAP_climo = np.nanmean(CMAP, axis=0) TCR_climo = np.nanmean(TCRfull, axis=0) ERA_climo = np.nanmean(ERAfull, axis=0) fig = plt.figure() ax = fig.add_subplot(2,2,1) fmin = 0; fmax = 4000; nflevs=41 LMR_plotter(GPCP_climo,lat_GPCP,lon_GPCP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max') plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(2,2,2) fmin = 0; fmax = 4000; nflevs=41 LMR_plotter(CMAP_climo,lat_CMAP,lon_CMAP,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max') plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(2,2,3) fmin = 0; fmax = 4000; nflevs=41 LMR_plotter(TCR_climo,lat2_TCR,lon2_TCR,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max') plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(2,2,4) fmin = 0; fmax = 4000; nflevs=41 LMR_plotter(ERA_climo,lat2_ERA,lon2_ERA,'Reds',nflevs,vmin=fmin,vmax=fmax,extend='max') plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' '+verif_dict[var][5]+' '+'climo.', fontweight='bold') plt.clim(fmin,fmax) fig.tight_layout() plt.savefig('GPCP_CMAP_20CR_ERA_climo.png') plt.close() ############################################################### # END: load verification data # ############################################################### # ---------------------------------------------------------- # Adjust so that all anomaly data pertain to the mean over a # common user-defined reference period (e.g. 20th century) # ---------------------------------------------------------- print('Re-center on %s-%s period' % (str(ref_period[0]), str(ref_period[1]))) stime = ref_period[0] etime = ref_period[1] # LMR LMR = xam smatch, ematch = find_date_indices(LMR_time,stime,etime) LMR = LMR - np.mean(LMR[smatch:ematch,:,:],axis=0) # verif smatch, ematch = find_date_indices(GPCP_time,stime,etime) GPCP = GPCP - np.mean(GPCP[smatch:ematch,:,:],axis=0) smatch, ematch = find_date_indices(CMAP_time,stime,etime) CMAP = CMAP - np.mean(CMAP[smatch:ematch,:,:],axis=0) smatch, ematch = find_date_indices(TCR_time,stime,etime) TCR = TCR - np.mean(TCR[smatch:ematch,:,:],axis=0) smatch, ematch = find_date_indices(ERA_time,stime,etime) ERA = ERA - np.mean(ERA[smatch:ematch,:,:],axis=0) print('GPCP : Global: mean=', np.nanmean(GPCP), ' , std-dev=', np.nanstd(GPCP)) print('CMAP : Global: mean=', np.nanmean(CMAP), ' , std-dev=', np.nanstd(CMAP)) print('TCR : Global: mean=', np.nanmean(TCR), ' , std-dev=', np.nanstd(TCR)) print('ERA : Global: mean=', np.nanmean(ERA), ' , std-dev=', np.nanstd(ERA)) print('LMR : Global: mean=', np.nanmean(LMR), ' , std-dev=', np.nanstd(LMR)) # ----------------------------------- # Regridding the data for comparisons # ----------------------------------- print('\n regridding data to a common grid...\n') iplot_loc= False #iplot_loc= True # create instance of the spherical harmonics object for each grid specob_lmr = Spharmt(nlon,nlat,gridtype='regular',legfunc='computed') specob_gpcp = Spharmt(nlon_GPCP,nlat_GPCP,gridtype='regular',legfunc='computed') specob_cmap = Spharmt(nlon_CMAP,nlat_CMAP,gridtype='regular',legfunc='computed') specob_tcr = Spharmt(nlon_TCR,nlat_TCR,gridtype='regular',legfunc='computed') specob_era = Spharmt(nlon_ERA,nlat_ERA,gridtype='regular',legfunc='computed') # truncate to a lower resolution grid (common:21, 42, 62, 63, 85, 106, 255, 382, 799) ntrunc_new = 42 # T42 ifix = np.remainder(ntrunc_new,2.0).astype(int) nlat_new = ntrunc_new + ifix nlon_new = int(nlat_new*1.5) # lat, lon grid in the truncated space dlat = 90./((nlat_new-1)/2.) dlon = 360./nlon_new veclat = np.arange(-90.,90.+dlat,dlat) veclon = np.arange(0.,360.,dlon) blank = np.zeros([nlat_new,nlon_new]) lat2_new = (veclat + blank.T).T lon2_new = (veclon + blank) # create instance of the spherical harmonics object for the new grid specob_new = Spharmt(nlon_new,nlat_new,gridtype='regular',legfunc='computed') lmr_trunc = np.zeros([nyrs,nlat_new,nlon_new]) print('lmr_trunc shape: ' + str(np.shape(lmr_trunc))) # loop over years of interest and transform...specify trange at top of file iw = 0 if nya > 0: iw = (nya-1)/2 cyears = list(range(trange[0],trange[1])) lg_csave = np.zeros([len(cyears)]) lc_csave = np.zeros([len(cyears)]) lt_csave = np.zeros([len(cyears)]) le_csave = np.zeros([len(cyears)]) gc_csave = np.zeros([len(cyears)]) gt_csave = np.zeros([len(cyears)]) ge_csave = np.zeros([len(cyears)]) te_csave = np.zeros([len(cyears)]) lmr_allyears = np.zeros([len(cyears),nlat_new,nlon_new]) gpcp_allyears = np.zeros([len(cyears),nlat_new,nlon_new]) cmap_allyears = np.zeros([len(cyears),nlat_new,nlon_new]) tcr_allyears = np.zeros([len(cyears),nlat_new,nlon_new]) era_allyears = np.zeros([len(cyears),nlat_new,nlon_new]) lmr_zm = np.zeros([len(cyears),nlat_new]) gpcp_zm = np.zeros([len(cyears),nlat_new]) cmap_zm = np.zeros([len(cyears),nlat_new]) tcr_zm = np.zeros([len(cyears),nlat_new]) era_zm = np.zeros([len(cyears),nlat_new]) k = -1 for yr in cyears: k = k + 1 LMR_smatch, LMR_ematch = find_date_indices(LMR_time,yr-iw,yr+iw+1) GPCP_smatch, GPCP_ematch = find_date_indices(GPCP_time,yr-iw,yr+iw+1) CMAP_smatch, CMAP_ematch = find_date_indices(CMAP_time,yr-iw,yr+iw+1) TCR_smatch, TCR_ematch = find_date_indices(TCR_time,yr-iw,yr+iw+1) ERA_smatch, ERA_ematch = find_date_indices(ERA_time,yr-iw,yr+iw+1) print('------------------------------------------------------------------------') print('working on year... %5s' %(str(yr))) print(' %5s LMR index = %5s : LMR year = %5s' %(str(yr), str(LMR_smatch), str(LMR_time[LMR_smatch]))) if GPCP_smatch: print(' %5s GPCP index = %5s : GPCP year = %5s' %(str(yr), str(GPCP_smatch), str(GPCP_time[GPCP_smatch]))) if CMAP_smatch: print(' %5s CMAP index = %5s : CMAP year = %5s' %(str(yr), str(CMAP_smatch), str(CMAP_time[CMAP_smatch]))) if TCR_smatch: print(' %5s TCP index = %5s : TCR year = %5s' %(str(yr), str(TCR_smatch), str(TCR_time[TCR_smatch]))) if ERA_smatch: print(' %5s ERA index = %5s : ERA year = %5s' %(str(yr), str(ERA_smatch), str(ERA_time[ERA_smatch]))) # LMR pdata_lmr = np.mean(LMR[LMR_smatch:LMR_ematch,:,:],0) lmr_trunc = regrid(specob_lmr, specob_new, pdata_lmr, ntrunc=nlat_new-1, smooth=None) # GPCP if GPCP_smatch and GPCP_ematch: pdata_gpcp = np.mean(GPCP[GPCP_smatch:GPCP_ematch,:,:],0) else: pdata_gpcp = np.zeros(shape=[nlat_GPCP,nlon_GPCP]) pdata_gpcp.fill(np.nan) # regrid on LMR grid if np.isnan(pdata_gpcp).all(): gpcp_trunc = np.zeros(shape=[nlat_new,nlon_new]) gpcp_trunc.fill(np.nan) else: gpcp_trunc = regrid(specob_gpcp, specob_new, pdata_gpcp, ntrunc=nlat_new-1, smooth=None) # CMAP if CMAP_smatch and CMAP_ematch: pdata_cmap = np.mean(CMAP[CMAP_smatch:CMAP_ematch,:,:],0) else: pdata_cmap = np.zeros(shape=[nlat_CMAP,nlon_CMAP]) pdata_cmap.fill(np.nan) # regrid on LMR grid if np.isnan(pdata_cmap).all(): cmap_trunc = np.zeros(shape=[nlat_new,nlon_new]) cmap_trunc.fill(np.nan) else: cmap_trunc = regrid(specob_cmap, specob_new, pdata_cmap, ntrunc=nlat_new-1, smooth=None) # TCR if TCR_smatch and TCR_ematch: pdata_tcr = np.mean(TCR[TCR_smatch:TCR_ematch,:,:],0) else: pdata_tcr = np.zeros(shape=[nlat_TCR,nlon_TCR]) pdata_tcr.fill(np.nan) # regrid on LMR grid if np.isnan(pdata_tcr).all(): tcr_trunc = np.zeros(shape=[nlat_new,nlon_new]) tcr_trunc.fill(np.nan) else: tcr_trunc = regrid(specob_tcr, specob_new, pdata_tcr, ntrunc=nlat_new-1, smooth=None) # ERA if ERA_smatch and ERA_ematch: pdata_era = np.mean(ERA[ERA_smatch:ERA_ematch,:,:],0) else: pdata_era = np.zeros(shape=[nlat_ERA,nlon_ERA]) pdata_era.fill(np.nan) # regrid on LMR grid if np.isnan(pdata_era).all(): era_trunc = np.zeros(shape=[nlat_new,nlon_new]) era_trunc.fill(np.nan) else: era_trunc = regrid(specob_era, specob_new, pdata_era, ntrunc=nlat_new-1, smooth=None) if iplot_individual_years: # Precipitation products comparison figures (annually-averaged anomaly fields) fmin = verif_dict[var][3]; fmax = verif_dict[var][4]; nflevs=41 fig = plt.figure() ax = fig.add_subplot(5,1,1) LMR_plotter(lmr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') plt.title('LMR '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(5,1,2) LMR_plotter(gpcp_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') plt.title('GPCP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') #LMR_plotter(pdata_gpcp*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') #plt.title( 'GPCP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(5,1,3) LMR_plotter(cmap_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') plt.title('CMAP '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') #LMR_plotter(pdata_cmap*verif_dict[var][6],lat_GPCP,lon_GPCP,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') #plt.title( 'CMAP '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(5,1,4) LMR_plotter(tcr_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') plt.title('20CR-V2 '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') #LMR_plotter(pdata_tcr*verif_dict[var][6],lat_TCR,lon_TCR,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') #plt.title( '20CR-V2 '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') plt.clim(fmin,fmax) ax = fig.add_subplot(5,1,5) LMR_plotter(era_trunc*verif_dict[var][6],lat2_new,lon2_new,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') plt.title('ERA20C '+'T'+str(nlat_new-ifix)+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') #LMR_plotter(pdata_era*verif_dict[var][6],lat_ERA,lon_ERA,'bwr',nflevs,vmin=fmin,vmax=fmax,extend='both') #plt.title( 'ERA20C '+'orig. grid'+' '+verif_dict[var][1]+' anom. '+verif_dict[var][5]+' '+str(yr), fontweight='bold') plt.clim(fmin,fmax) fig.tight_layout() plt.savefig(nexp+'_LMR_GPCP_CMAP_TCR_ERA_'+verif_dict[var][1]+'anom_'+str(yr)+'.png') plt.close() # save the full grids lmr_allyears[k,:,:] = lmr_trunc gpcp_allyears[k,:,:] = gpcp_trunc cmap_allyears[k,:,:] = cmap_trunc tcr_allyears[k,:,:] = tcr_trunc era_allyears[k,:,:] = era_trunc # ----------------------- # zonal-mean verification # ----------------------- # LMR lmr_zm[k,:] = np.mean(lmr_trunc,1) # GPCP fracok = np.sum(np.isfinite(gpcp_trunc),axis=1,dtype=np.float16)/float(nlon_GPCP) boolok = np.where(fracok >= valid_frac) boolnotok = np.where(fracok < valid_frac) for i in boolok: gpcp_zm[k,i] = np.nanmean(gpcp_trunc[i,:],axis=1) gpcp_zm[k,boolnotok] = np.NAN # CMAP fracok = np.sum(np.isfinite(cmap_trunc),axis=1,dtype=np.float16)/float(nlon_CMAP) boolok = np.where(fracok >= valid_frac) boolnotok = np.where(fracok < valid_frac) for i in boolok: cmap_zm[k,i] = np.nanmean(cmap_trunc[i,:],axis=1) cmap_zm[k,boolnotok] = np.NAN # TCR tcr_zm[k,:] = np.mean(tcr_trunc,1) # ERA era_zm[k,:] = np.mean(era_trunc,1) if iplot_loc: ncints = 30 cmap = 'bwr' nticks = 6 # number of ticks on the colorbar # set contours based on GPCP maxabs = np.nanmax(np.abs(gpcp_trunc)) # round the contour interval, and then set limits to fit dc = np.round(maxabs*2/ncints,2) cl = dc*ncints/2. cints = np.linspace(-cl,cl,ncints,endpoint=True) # compare LMR with GPCP, CMAP, TCR and ERA fig = plt.figure() ax = fig.add_subplot(3,2,1) m1 = bm.Basemap(projection='robin',lon_0=0) # maxabs = np.nanmax(np.abs(lmr_trunc)) cs = m1.contourf(lon2_new,lat2_new,lmr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs) m1.drawcoastlines() cb = m1.colorbar(cs) tick_locator = ticker.MaxNLocator(nbins=nticks) cb.locator = tick_locator cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator()) cb.update_ticks() ax.set_title('LMR '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr)) ax = fig.add_subplot(3,2,3) m2 = bm.Basemap(projection='robin',lon_0=0) # maxabs = np.nanmax(np.abs(gpcp_trunc)) cs = m2.contourf(lon2_new,lat2_new,gpcp_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs) m2.drawcoastlines() cb = m1.colorbar(cs) tick_locator = ticker.MaxNLocator(nbins=nticks) cb.locator = tick_locator cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator()) cb.update_ticks() ax.set_title('GPCP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr)) ax = fig.add_subplot(3,2,4) m3 = bm.Basemap(projection='robin',lon_0=0) # maxabs = np.nanmax(np.abs(cmap_trunc)) cs = m3.contourf(lon2_new,lat2_new,cmap_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs) m3.drawcoastlines() cb = m1.colorbar(cs) tick_locator = ticker.MaxNLocator(nbins=nticks) cb.locator = tick_locator cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator()) cb.update_ticks() ax.set_title('CMAP '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr)) ax = fig.add_subplot(3,2,5) m3 = bm.Basemap(projection='robin',lon_0=0) # maxabs = np.nanmax(np.abs(tcr_trunc)) cs = m3.contourf(lon2_new,lat2_new,tcr_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs) m3.drawcoastlines() cb = m1.colorbar(cs) tick_locator = ticker.MaxNLocator(nbins=nticks) cb.locator = tick_locator cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator()) cb.update_ticks() ax.set_title('20CR-V2 '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr)) ax = fig.add_subplot(3,2,6) m3 = bm.Basemap(projection='robin',lon_0=0) # maxabs = np.nanmax(np.abs(era_trunc)) cs = m3.contourf(lon2_new,lat2_new,era_trunc,cints,cmap=plt.get_cmap(cmap),vmin=-maxabs,vmax=maxabs) m3.drawcoastlines() cb = m1.colorbar(cs) tick_locator = ticker.MaxNLocator(nbins=nticks) cb.locator = tick_locator cb.ax.yaxis.set_major_locator(matplotlib.ticker.AutoLocator()) cb.update_ticks() ax.set_title('ERA20C '+verif_dict[var][1]+' '+str(ntrunc_new) + ' ' + str(yr)) plt.clim(-maxabs,maxabs) # get these numbers by adjusting the figure interactively!!! plt.subplots_adjust(left=0.05, bottom=0.45, right=0.95, top=0.95, wspace=0.1, hspace=0.0) # plt.tight_layout(pad=0.3) fig.suptitle(verif_dict[var][1] + ' for ' +str(nya) +' year centered average') # anomaly correlation lmrvec = np.reshape(lmr_trunc,(1,nlat_new*nlon_new)) gpcpvec = np.reshape(gpcp_trunc,(1,nlat_new*nlon_new)) cmapvec = np.reshape(cmap_trunc,(1,nlat_new*nlon_new)) tcrvec = np.reshape(tcr_trunc,(1,nlat_new*nlon_new)) eravec = np.reshape(era_trunc,(1,nlat_new*nlon_new)) # lmr <-> gpcp indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: lg_csave[k] = np.corrcoef(lmrvec[indok],gpcpvec[indok])[0,1] else: lg_csave[k] = np.nan print(' lmr-gpcp correlation : %s' % str(lg_csave[k])) # lmr <-> cmap indok = np.isfinite(cmapvec); nbok = np.sum(indok); nball = cmapvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: lc_csave[k] = np.corrcoef(lmrvec[indok],cmapvec[indok])[0,1] else: lc_csave[k] = np.nan print(' lmr-cmap correlation : %s' % str(lc_csave[k])) # lmr <-> tcr indok = np.isfinite(tcrvec); nbok = np.sum(indok); nball = tcrvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: lt_csave[k] = np.corrcoef(lmrvec[indok],tcrvec[indok])[0,1] else: lt_csave[k] = np.nan print(' lmr-tcr correlation : %s' % str(lt_csave[k])) # lmr <-> era indok = np.isfinite(eravec); nbok = np.sum(indok); nball = eravec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: le_csave[k] = np.corrcoef(lmrvec[indok],eravec[indok])[0,1] else: le_csave[k] = np.nan print(' lmr-era correlation : %s' % str(le_csave[k])) # gpcp <-> cmap indok = np.isfinite(cmapvec); nbok = np.sum(indok); nball = cmapvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: gc_csave[k] = np.corrcoef(gpcpvec[indok],cmapvec[indok])[0,1] else: gc_csave[k] = np.nan print(' gpcp-cmap correlation : %s' % str(gc_csave[k])) # gpcp <-> tcr indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: gt_csave[k] = np.corrcoef(gpcpvec[indok],tcrvec[indok])[0,1] else: gt_csave[k] = np.nan print(' gpcp-tcr correlation : %s' % str(gt_csave[k])) # gpcp <-> era indok = np.isfinite(gpcpvec); nbok = np.sum(indok); nball = gpcpvec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: ge_csave[k] = np.corrcoef(gpcpvec[indok],eravec[indok])[0,1] else: ge_csave[k] = np.nan print(' gpcp-era correlation : %s' % str(ge_csave[k])) # tcr <-> era indok = np.isfinite(eravec); nbok = np.sum(indok); nball = eravec.shape[1] ratio = float(nbok)/float(nball) if ratio > valid_frac: te_csave[k] = np.corrcoef(tcrvec[indok],eravec[indok])[0,1] else: te_csave[k] = np.nan print(' tcr-era correlation : %s' % str(te_csave[k])) # -- plots for anomaly correlation statistics -- # number of bins in the histograms nbins = 15 corr_range = [-0.6,1.0] bins = np.linspace(corr_range[0],corr_range[1],nbins) # LMR compared to GPCP, CMAP, TCR and ERA fig = plt.figure() # GPCP ax = fig.add_subplot(4,2,1) ax.plot(cyears,lg_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('LMR - GPCP') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') # ax = fig.add_subplot(4,2,2) ax.hist(lg_csave[~np.isnan(lg_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('LMR - GPCP') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lg_csave)),fontsize=11,fontweight='bold') # CMAP ax = fig.add_subplot(4,2,3) ax.plot(cyears,lc_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('LMR - CMAP') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') # ax = fig.add_subplot(4,2,4) ax.hist(lc_csave[~np.isnan(lc_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('LMR - CMAP') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lc_csave)),fontsize=11,fontweight='bold') # TCR ax = fig.add_subplot(4,2,5) ax.plot(cyears,lt_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('LMR - 20CR-V2') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') # ax = fig.add_subplot(4,2,6) ax.hist(lt_csave[~np.isnan(lt_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('LMR - 20CR-V2') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(lt_csave)),fontsize=11,fontweight='bold') # ERA ax = fig.add_subplot(4,2,7) ax.plot(cyears,le_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('LMR - ERA20C') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') # ax = fig.add_subplot(4,2,8) ax.hist(le_csave[~np.isnan(le_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('LMR - ERA20C') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(le_csave)),fontsize=11,fontweight='bold') fig.tight_layout() plt.subplots_adjust(left=0.1, bottom=0.25, right=0.95, top=0.93, wspace=0.5, hspace=0.5) fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold') if fsave: print('saving to .png') plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.png') plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_LMR_'+str(trange[0])+'-'+str(trange[1])+'.pdf', bbox_inches='tight', dpi=300, format='pdf') plt.close() # Reference : TCR & ERA compared to GPCP + ERA compared to TCR fig = plt.figure() # TCR <-> GPCP ax = fig.add_subplot(3,2,1) ax.plot(cyears,gt_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('20CR-V2 - GPCP') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') ax.set_xlabel('Year CE',fontweight='bold') # ax = fig.add_subplot(3,2,2) ax.hist(gt_csave[~np.isnan(gt_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('20CR-V2 - GPCP') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') ax.set_xlabel('Correlation',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(gt_csave)),fontsize=11,fontweight='bold') # ERA <-> GPCP ax = fig.add_subplot(3,2,3) ax.plot(cyears,ge_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('ERA20C - GPCP') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') ax.set_xlabel('Year CE',fontweight='bold') # ax = fig.add_subplot(3,2,4) ax.hist(ge_csave[~np.isnan(ge_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('ERA20C - GPCP') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') ax.set_xlabel('Correlation',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(ge_csave)),fontsize=11,fontweight='bold') # ERA <-> TCR ax = fig.add_subplot(3,2,5) ax.plot(cyears,te_csave,lw=2) ax.plot([trange[0],trange[-1]],[0,0],'k:') ax.set_title('ERA20C - 20CR-V2') ax.set_xlim(trange[0],trange[-1]) ax.set_ylim(corr_range[0],corr_range[-1]) ax.set_ylabel('Correlation',fontweight='bold') ax.set_xlabel('Year CE',fontweight='bold') # ax = fig.add_subplot(3,2,6) ax.hist(te_csave[~np.isnan(te_csave)],bins=bins,histtype='stepfilled',alpha=0.25) ax.set_title('ERA20C - GPCP') ax.set_xlim(corr_range[0],corr_range[-1]) ax.set_ylabel('Counts',fontweight='bold') ax.set_xlabel('Correlation',fontweight='bold') xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() ypos = ymax-0.15*(ymax-ymin) xpos = xmin+0.025*(xmax-xmin) ax.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.nanmean(te_csave)),fontsize=11,fontweight='bold') fig.tight_layout() plt.subplots_adjust(left=0.1, bottom=0.35, right=0.95, top=0.93, wspace=0.5, hspace=0.5) fig.suptitle(verif_dict[var][2]+' anomaly correlation',fontweight='bold') if fsave: print('saving to .png') plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_'+str(trange[0])+'-'+str(trange[1])+'_reference.png') plt.savefig(nexp+'_verify_grid_'+verif_dict[var][1]+'_anomaly_correlation_'+str(trange[0])+'-'+str(trange[1])+'_reference.pdf', bbox_inches='tight', dpi=300, format='pdf') plt.close() # # BEGIN bias, r and CE calculations # # correlation and CE at each (lat,lon) point lg_err = lmr_allyears - gpcp_allyears lc_err = lmr_allyears - cmap_allyears lr_err = lmr_allyears - tcr_allyears le_err = lmr_allyears - era_allyears gc_err = gpcp_allyears - cmap_allyears tg_err = tcr_allyears - gpcp_allyears eg_err = era_allyears - gpcp_allyears te_err = tcr_allyears - era_allyears r_lg = np.zeros([nlat_new,nlon_new]) ce_lg = np.zeros([nlat_new,nlon_new]) r_lc = np.zeros([nlat_new,nlon_new]) ce_lc = np.zeros([nlat_new,nlon_new]) r_lr = np.zeros([nlat_new,nlon_new]) ce_lr = np.zeros([nlat_new,nlon_new]) r_le = np.zeros([nlat_new,nlon_new]) ce_le = np.zeros([nlat_new,nlon_new]) r_gc = np.zeros([nlat_new,nlon_new]) ce_gc = np.zeros([nlat_new,nlon_new]) r_tg = np.zeros([nlat_new,nlon_new]) ce_tg = np.zeros([nlat_new,nlon_new]) r_eg = np.zeros([nlat_new,nlon_new]) ce_eg = np.zeros([nlat_new,nlon_new]) r_te = np.zeros([nlat_new,nlon_new]) ce_te = np.zeros([nlat_new,nlon_new]) # bias # ... # CE ce_lg = coefficient_efficiency(gpcp_allyears,lmr_allyears) ce_lc = coefficient_efficiency(cmap_allyears,lmr_allyears) ce_lr = coefficient_efficiency(tcr_allyears,lmr_allyears) ce_le = coefficient_efficiency(era_allyears,lmr_allyears) ce_gc = coefficient_efficiency(cmap_allyears,gpcp_allyears) ce_tg = coefficient_efficiency(gpcp_allyears,tcr_allyears) ce_eg = coefficient_efficiency(gpcp_allyears,era_allyears) ce_te = coefficient_efficiency(era_allyears,tcr_allyears) # Correlation for la in range(nlat_new): for lo in range(nlon_new): # LMR-GPCP indok =
np.isfinite(gpcp_allyears[:,la,lo])
numpy.isfinite
import numpy def interpolate2d(x, y, Z, points, mode='linear', bounds_error=False): """2D interpolation routine: it outputs an array with same length as points with interpolated values. Parameters ---------- x : array x-coordinates of the mesh on which to interpolate y : array y-coordinates of the mesh on which to interpolate Z : array 2D array of values for each x, y pair points : array Nx2 array of coordinates where interpolated values are sought mode : str, optional Determines the interpolation order, by default 'linear' bounds_error : bool, optional Boolean flag. If True an exception will be raised when interpolated values are requested outside the domain of the input data, by default False :Authors: Modified from the SAFE library: https://github.com/inasafe/python-safe/safe/engine/interpolation2d.py """ # Input checks x, y, Z, xi, eta = check_inputs(x, y, Z, points, mode, bounds_error) # Identify elements that are outside interpolation domain or NaN outside = (xi < x[0]) + (eta < y[0]) + (xi > x[-1]) + (eta > y[-1]) outside += numpy.isnan(xi) + numpy.isnan(eta) inside = ~outside xi = xi[inside] eta = eta[inside] # Find upper neighbours for each interpolation point idx = numpy.searchsorted(x, xi, side='left') idy = numpy.searchsorted(y, eta, side='left') # Internal check (index == 0 is OK) msg = ('Interpolation point outside domain. This should never happen. ' 'Please email <EMAIL>') if len(idx) > 0: if not max(idx) < len(x): raise RuntimeError(msg) if len(idy) > 0: if not max(idy) < len(y): raise RuntimeError(msg) # Get the four neighbours for each interpolation point x0 = x[idx - 1] x1 = x[idx] y0 = y[idy - 1] y1 = y[idy] z00 = Z[idx - 1, idy - 1] z01 = Z[idx - 1, idy] z10 = Z[idx, idy - 1] z11 = Z[idx, idy] # Coefficients for weighting between lower and upper bounds oldset = numpy.seterr(invalid='ignore') # Suppress warnings alpha = (xi - x0) / (x1 - x0) beta = (eta - y0) / (y1 - y0) numpy.seterr(**oldset) # Restore if mode == 'linear': # Bilinear interpolation formula dx = z10 - z00 dy = z01 - z00 z = z00 + alpha * dx + beta * dy + alpha * beta * (z11 - dx - dy - z00) else: # Piecewise constant (as verified in input_check) # Set up masks for the quadrants left = alpha < 0.5 right = -left lower = beta < 0.5 upper = -lower lower_left = lower * left lower_right = lower * right upper_left = upper * left # Initialise result array with all elements set to upper right z = z11 # Then set the other quadrants z[lower_left] = z00[lower_left] z[lower_right] = z10[lower_right] z[upper_left] = z01[upper_left] # Self test if len(z) > 0: mz = numpy.nanmax(z) mZ = numpy.nanmax(Z) msg = ('Internal check failed. Max interpolated value %.15f ' 'exceeds max grid value %.15f ' % (mz, mZ)) if not(numpy.isnan(mz) or numpy.isnan(mZ)): if not mz <= mZ: raise RuntimeError(msg) # Populate result with interpolated values for points inside domain # and NaN for values outside r = numpy.zeros(len(points)) r[inside] = z r[outside] = numpy.nan return r def check_inputs(x, y, Z, points, mode, bounds_error): """Check inputs for interpolate2d function """ msg = 'Only mode "linear" and "constant" are implemented. I got %s' % mode if mode not in ['linear', 'constant']: raise RuntimeError(msg) try: x =
numpy.array(x)
numpy.array
import numpy as np def yorke(x, r): return x * r * (1. - x) class CahosAgent: def __init__(self, ticket, invested=100000., r=None): self.score = 0 self.ror_history = [] self.dist = np.array([1.]) self.ticket = ticket self.tr_cost = 2. if r is None: self.r = np.random.uniform(2.9, 3.9, 3) else: self.r = r self.invested = invested self.cash = invested self.shares = np.zeros(1) self.history = [] self.actions = [] self.x0_history = [] self.x1_history = [] self.x2_history = [] self.state = [] def invest(self, data): if len(data.keys()) == 0: return x = np.array([0.01, 0.01, 0.01]) self.x0_history.append(x[0]) self.x1_history.append(x[1]) self.x2_history.append(x[2]) for _ in range(20): x = yorke(x, self.r) self.x0_history.append(x[0]) self.x1_history.append(x[1]) self.x2_history.append(x[2]) pct = data.pct_change().as_matrix() bench = data.pct_change().cumsum().as_matrix() for i in range(len(data)): prices = data.iloc[i].values portfolio = self.cash + np.dot(prices, self.shares) try: if np.isnan(portfolio): portfolio = 0. except: print('portfolio:', portfolio) self.history.append(portfolio) ror = (portfolio - self.invested) / self.invested self.score_based_on_ror(ror) # self.score_based_on_beating_benchmark(ror, bench[i]) self.ror_history.append(ror) x = yorke(x, self.r) self.x0_history.append(x[0]) self.x1_history.append(x[1]) self.x2_history.append(x[2]) self.state.append(np.array([pct[i], bench[i]])) if x[2] >= 0.9: self.actions.append('H') continue elif x[0] >= 0.9 and sum(self.shares > 0) > 0: self.actions.append('S') self.cash = np.dot(self.shares, prices) - sum(self.shares > 0) * self.tr_cost self.shares = np.zeros(1) elif x[1] >= 0.9 and self.cash > prices: self.actions.append('B') portfolio = self.cash +
np.dot(prices, self.shares)
numpy.dot
'''Example of VAE on MNIST dataset using MLP The VAE has a modular design. The encoder, decoder and VAE are 3 models that share weights. After training the VAE model, the encoder can be used to generate latent vectors. The decoder can be used to generate MNIST digits by sampling the latent vector from a Gaussian distribution with mean=0 and std=1. # Reference [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." https://arxiv.org/abs/1312.6114 ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from keras.layers import Lambda, Input, Dense from keras.models import Model from keras.datasets import mnist from keras.losses import mse, binary_crossentropy from keras.utils import plot_model from keras import backend as K import numpy as np import matplotlib.pyplot as plt import argparse import os if K.backend() == 'mxnet': raise NotImplementedError("MXNet Backend: Cannot auto infer input shapes.") # reparameterization trick # instead of sampling from Q(z|X), sample eps = N(0,I) # z = z_mean + sqrt(var)*eps def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. # Arguments: args (tensor): mean and log of variance of Q(z|X) # Returns: z (tensor): sampled latent vector """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon def plot_results(models, data, batch_size=128, model_name="vae_mnist"): """Plots labels and MNIST digits as function of 2-dim latent vector # Arguments: models (tuple): encoder and decoder models data (tuple): test data and label batch_size (int): prediction batch size model_name (string): which model is using this function """ encoder, decoder = models x_test, y_test = data os.makedirs(model_name, exist_ok=True) filename = os.path.join(model_name, "vae_mean.png") # display a 2D plot of the digit classes in the latent space z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(12, 10)) plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test) plt.colorbar() plt.xlabel("z[0]") plt.ylabel("z[1]") plt.savefig(filename) plt.show() filename = os.path.join(model_name, "digits_over_latent.png") # display a 30x30 2D manifold of digits n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) # linearly spaced coordinates corresponding to the 2D plot # of digit classes in the latent space grid_x = np.linspace(-4, 4, n) grid_y = np.linspace(-4, 4, n)[::-1] for i, yi in enumerate(grid_y): for j, xi in enumerate(grid_x): z_sample = np.array([[xi, yi]]) x_decoded = decoder.predict(z_sample) digit = x_decoded[0].reshape(digit_size, digit_size) figure[i * digit_size: (i + 1) * digit_size, j * digit_size: (j + 1) * digit_size] = digit plt.figure(figsize=(10, 10)) start_range = digit_size // 2 end_range = n * digit_size + start_range + 1 pixel_range = np.arange(start_range, end_range, digit_size) sample_range_x = np.round(grid_x, 1) sample_range_y = np.round(grid_y, 1) plt.xticks(pixel_range, sample_range_x) plt.yticks(pixel_range, sample_range_y) plt.xlabel("z[0]") plt.ylabel("z[1]") plt.imshow(figure, cmap='Greys_r') plt.savefig(filename) plt.show() # MNIST dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() image_size = x_train.shape[1] original_dim = image_size * image_size x_train = np.reshape(x_train, [-1, original_dim]) x_test =
np.reshape(x_test, [-1, original_dim])
numpy.reshape
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jun 17 14:49:27 2021 @author: SilvinW Test module for the lorenz attractor file. Two iterations are performed and the output is compared compared to the expected output using np.allclose as we are working with floating-point numbers """ import lorenz.solver as ls import numpy as np class TestCalulateLorenzAttractor: """ Description of the test functions: test_zeros: x = 0, y = 0, z = 0 sigma = 0, beta = 0, rho = 0 test_case_one_111: x = 1, y = 1, z = 1 sigma = 10.0, beta = 8/3, rho = 6.0 test_case_one_54m3: x = 5, y = 4, z = -3 sigma = 10.0, beta = 8/3, rho = 6.0 test_case_five_111: x = 1, y = 1, z = 1 sigma = 14.0, beta = 13/3, rho = 28.0 test_case_five_1m4m6: x = 1, y = -4, z = -6 sigma = 14.0, beta = 13/3, rho = 28.0 """ # All initial conditions and parameters set to 0 should return zeros def test_zeros(self): assert np.allclose(ls.solve (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.002, 2), (
np.array ([0., 0.])
numpy.array
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Bernoulli distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.distributions import bernoulli from tensorflow.python.ops.distributions import kullback_leibler from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module special = try_import("scipy.special") def make_bernoulli(batch_shape, dtype=dtypes.int32): p = np.random.uniform(size=list(batch_shape)) p = constant_op.constant(p, dtype=dtypes.float32) return bernoulli.Bernoulli(probs=p, dtype=dtype) def entropy(p): q = 1. - p return -q * np.log(q) - p * np.log(p) class BernoulliTest(test.TestCase): @test_util.run_in_graph_and_eager_modes() def testP(self): p = [0.2, 0.4] dist = bernoulli.Bernoulli(probs=p) with self.test_session(): self.assertAllClose(p, self.evaluate(dist.probs)) @test_util.run_in_graph_and_eager_modes() def testLogits(self): logits = [-42., 42.] dist = bernoulli.Bernoulli(logits=logits) with self.test_session(): self.assertAllClose(logits, self.evaluate(dist.logits)) if not special: return with self.test_session(): self.assertAllClose(special.expit(logits), self.evaluate(dist.probs)) p = [0.01, 0.99, 0.42] dist = bernoulli.Bernoulli(probs=p) with self.test_session(): self.assertAllClose(special.logit(p), self.evaluate(dist.logits)) @test_util.run_in_graph_and_eager_modes() def testInvalidP(self): invalid_ps = [1.01, 2.] for p in invalid_ps: with self.test_session(): with self.assertRaisesOpError("probs has components greater than 1"): dist = bernoulli.Bernoulli(probs=p, validate_args=True) self.evaluate(dist.probs) invalid_ps = [-0.01, -3.] for p in invalid_ps: with self.test_session(): with self.assertRaisesOpError("Condition x >= 0"): dist = bernoulli.Bernoulli(probs=p, validate_args=True) self.evaluate(dist.probs) valid_ps = [0.0, 0.5, 1.0] for p in valid_ps: with self.test_session(): dist = bernoulli.Bernoulli(probs=p) self.assertEqual(p, self.evaluate(dist.probs)) # Should not fail @test_util.run_in_graph_and_eager_modes() def testShapes(self): with self.test_session(): for batch_shape in ([], [1], [2, 3, 4]): dist = make_bernoulli(batch_shape) self.assertAllEqual(batch_shape, dist.batch_shape.as_list()) self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor())) self.assertAllEqual([], dist.event_shape.as_list()) self.assertAllEqual([], self.evaluate(dist.event_shape_tensor())) @test_util.run_in_graph_and_eager_modes() def testDtype(self): dist = make_bernoulli([]) self.assertEqual(dist.dtype, dtypes.int32) self.assertEqual(dist.dtype, dist.sample(5).dtype) self.assertEqual(dist.dtype, dist.mode().dtype) self.assertEqual(dist.probs.dtype, dist.mean().dtype) self.assertEqual(dist.probs.dtype, dist.variance().dtype) self.assertEqual(dist.probs.dtype, dist.stddev().dtype) self.assertEqual(dist.probs.dtype, dist.entropy().dtype) self.assertEqual(dist.probs.dtype, dist.prob(0).dtype) self.assertEqual(dist.probs.dtype, dist.log_prob(0).dtype) dist64 = make_bernoulli([], dtypes.int64) self.assertEqual(dist64.dtype, dtypes.int64) self.assertEqual(dist64.dtype, dist64.sample(5).dtype) self.assertEqual(dist64.dtype, dist64.mode().dtype) @test_util.run_in_graph_and_eager_modes() def _testPmf(self, **kwargs): dist = bernoulli.Bernoulli(**kwargs) with self.test_session(): # pylint: disable=bad-continuation xs = [ 0, [1], [1, 0], [[1, 0]], [[1, 0], [1, 1]], ] expected_pmfs = [ [[0.8, 0.6], [0.7, 0.4]], [[0.2, 0.4], [0.3, 0.6]], [[0.2, 0.6], [0.3, 0.4]], [[0.2, 0.6], [0.3, 0.4]], [[0.2, 0.6], [0.3, 0.6]], ] # pylint: enable=bad-continuation for x, expected_pmf in zip(xs, expected_pmfs): self.assertAllClose(self.evaluate(dist.prob(x)), expected_pmf) self.assertAllClose( self.evaluate(dist.log_prob(x)), np.log(expected_pmf)) def testPmfCorrectBroadcastDynamicShape(self): with self.test_session(): p = array_ops.placeholder(dtype=dtypes.float32) dist = bernoulli.Bernoulli(probs=p) event1 = [1, 0, 1] event2 = [[1, 0, 1]] self.assertAllClose( dist.prob(event1).eval({ p: [0.2, 0.3, 0.4] }), [0.2, 0.7, 0.4]) self.assertAllClose( dist.prob(event2).eval({ p: [0.2, 0.3, 0.4] }), [[0.2, 0.7, 0.4]]) @test_util.run_in_graph_and_eager_modes() def testPmfInvalid(self): p = [0.1, 0.2, 0.7] with self.test_session(): dist = bernoulli.Bernoulli(probs=p, validate_args=True) with self.assertRaisesOpError("must be non-negative."): self.evaluate(dist.prob([1, 1, -1])) with self.assertRaisesOpError("Elements cannot exceed 1."): self.evaluate(dist.prob([2, 0, 1])) @test_util.run_in_graph_and_eager_modes() def testPmfWithP(self): p = [[0.2, 0.4], [0.3, 0.6]] self._testPmf(probs=p) if not special: return self._testPmf(logits=special.logit(p)) def testBroadcasting(self): with self.test_session(): p = array_ops.placeholder(dtypes.float32) dist = bernoulli.Bernoulli(probs=p) self.assertAllClose(np.log(0.5), dist.log_prob(1).eval({p: 0.5})) self.assertAllClose( np.log([0.5, 0.5, 0.5]), dist.log_prob([1, 1, 1]).eval({ p: 0.5 })) self.assertAllClose( np.log([0.5, 0.5, 0.5]), dist.log_prob(1).eval({ p: [0.5, 0.5, 0.5] })) def testPmfShapes(self): with self.test_session(): p = array_ops.placeholder(dtypes.float32, shape=[None, 1]) dist = bernoulli.Bernoulli(probs=p) self.assertEqual(2, len(dist.log_prob(1).eval({p: [[0.5], [0.5]]}).shape)) with self.test_session(): dist = bernoulli.Bernoulli(probs=0.5) self.assertEqual(2, len(self.evaluate(dist.log_prob([[1], [1]])).shape)) with self.test_session(): dist = bernoulli.Bernoulli(probs=0.5) self.assertEqual((), dist.log_prob(1).get_shape()) self.assertEqual((1), dist.log_prob([1]).get_shape()) self.assertEqual((2, 1), dist.log_prob([[1], [1]]).get_shape()) with self.test_session(): dist = bernoulli.Bernoulli(probs=[[0.5], [0.5]]) self.assertEqual((2, 1), dist.log_prob(1).get_shape()) @test_util.run_in_graph_and_eager_modes() def testBoundaryConditions(self): with self.test_session(): dist = bernoulli.Bernoulli(probs=1.0) self.assertAllClose(np.nan, self.evaluate(dist.log_prob(0))) self.assertAllClose([np.nan], [self.evaluate(dist.log_prob(1))]) @test_util.run_in_graph_and_eager_modes() def testEntropyNoBatch(self): p = 0.2 dist = bernoulli.Bernoulli(probs=p) with self.test_session(): self.assertAllClose(self.evaluate(dist.entropy()), entropy(p)) @test_util.run_in_graph_and_eager_modes() def testEntropyWithBatch(self): p = [[0.1, 0.7], [0.2, 0.6]] dist = bernoulli.Bernoulli(probs=p, validate_args=False) with self.test_session(): self.assertAllClose( self.evaluate(dist.entropy()), [[entropy(0.1), entropy(0.7)], [entropy(0.2), entropy(0.6)]]) @test_util.run_in_graph_and_eager_modes() def testSampleN(self): with self.test_session(): p = [0.2, 0.6] dist = bernoulli.Bernoulli(probs=p) n = 100000 samples = dist.sample(n) samples.set_shape([n, 2]) self.assertEqual(samples.dtype, dtypes.int32) sample_values = self.evaluate(samples) self.assertTrue(np.all(sample_values >= 0)) self.assertTrue(np.all(sample_values <= 1)) # Note that the standard error for the sample mean is ~ sqrt(p * (1 - p) / # n). This means that the tolerance is very sensitive to the value of p # as well as n. self.assertAllClose(p, np.mean(sample_values, axis=0), atol=1e-2) self.assertEqual(set([0, 1]), set(sample_values.flatten())) # In this test we're just interested in verifying there isn't a crash # owing to mismatched types. b/30940152 dist = bernoulli.Bernoulli(np.log([.2, .4])) self.assertAllEqual((1, 2), dist.sample(1, seed=42).get_shape().as_list()) @test_util.run_in_graph_and_eager_modes() def testNotReparameterized(self): p = constant_op.constant([0.2, 0.6]) with backprop.GradientTape() as tape: tape.watch(p) dist = bernoulli.Bernoulli(probs=p) samples = dist.sample(100) grad_p = tape.gradient(samples, p) self.assertIsNone(grad_p) def testSampleActsLikeSampleN(self): with self.test_session() as sess: p = [0.2, 0.6] dist = bernoulli.Bernoulli(probs=p) n = 1000 seed = 42 self.assertAllEqual( self.evaluate(dist.sample(n, seed)), self.evaluate(dist.sample(n, seed))) n = array_ops.placeholder(dtypes.int32) sample, sample = sess.run([dist.sample(n, seed), dist.sample(n, seed)], feed_dict={n: 1000}) self.assertAllEqual(sample, sample) @test_util.run_in_graph_and_eager_modes() def testMean(self): with self.test_session(): p = np.array([[0.2, 0.7], [0.5, 0.4]], dtype=np.float32) dist = bernoulli.Bernoulli(probs=p) self.assertAllEqual(self.evaluate(dist.mean()), p) @test_util.run_in_graph_and_eager_modes() def testVarianceAndStd(self): var = lambda p: p * (1. - p) with self.test_session(): p = [[0.2, 0.7], [0.5, 0.4]] dist = bernoulli.Bernoulli(probs=p) self.assertAllClose( self.evaluate(dist.variance()), np.array( [[var(0.2), var(0.7)], [var(0.5), var(0.4)]], dtype=np.float32)) self.assertAllClose( self.evaluate(dist.stddev()), np.array( [[np.sqrt(var(0.2)), np.sqrt(var(0.7))], [np.sqrt(var(0.5)), np.sqrt(var(0.4))]], dtype=np.float32)) @test_util.run_in_graph_and_eager_modes() def testBernoulliBernoulliKL(self): batch_size = 6 a_p = np.array([0.5] * batch_size, dtype=np.float32) b_p = np.array([0.4] * batch_size, dtype=np.float32) a = bernoulli.Bernoulli(probs=a_p) b = bernoulli.Bernoulli(probs=b_p) kl = kullback_leibler.kl_divergence(a, b) kl_val = self.evaluate(kl) kl_expected = (a_p *
np.log(a_p / b_p)
numpy.log
# -*- coding: utf-8 -*- """ Created on Mon Jan 28 08:28:09 2019 @author: Manu """ import numpy as np import scipy from util import tools from scipy import signal from scipy import linalg from numpy import matlib def clean_windows(Signal,srate,max_bad_channels,zthresholds,window_len): # Remove periods with abnormally high-power content from continuous data. # [Signal,Mask] = clean_windows(Signal,MaxBadChannels,PowerTolerances,WindowLength,WindowOverlap,MaxDropoutFraction,Min) # # This function cuts segments from the data which contain high-power artifacts. Specifically, # only windows are retained which have less than a certain fraction of "bad" channels, where a channel # is bad in a window if its power is above or below a given upper/lower threshold (in standard # deviations from a robust estimate of the EEG power distribution in the channel). # # In: # Signal : Continuous data set, assumed to be appropriately high-passed (e.g. >1Hz or # 0.5Hz - 2.0Hz transition band) # # MaxBadChannels : The maximum number or fraction of bad channels that a retained window may still # contain (more than this and it is removed). Reasonable range is 0.05 (very clean # output) to 0.3 (very lax cleaning of only coarse artifacts). Default: 0.2. # # PowerTolerances: The minimum and maximum standard deviations within which the power of a channel # must lie (relative to a robust estimate of the clean EEG power distribution in # the channel) for it to be considered "not bad". Default: [-3.5 5]. # # # The following are detail parameters that usually do not have to be tuned. If you can't get # the function to do what you want, you might consider adapting these to your data. # # WindowLength : Window length that is used to check the data for artifact content. This is # ideally as long as the expected time scale of the artifacts but not shorter # than half a cycle of the high-pass filter that was used. Default: 1. # # WindowOverlap : Window overlap fraction. The fraction of two successive windows that overlaps. # Higher overlap ensures that fewer artifact portions are going to be missed (but # is slower). (default: 0.66) # # MaxDropoutFraction : Maximum fraction that can have dropouts. This is the maximum fraction of # time windows that may have arbitrarily low amplitude (e.g., due to the # sensors being unplugged). (default: 0.1) # # MinCleanFraction : Minimum fraction that needs to be clean. This is the minimum fraction of time # windows that need to contain essentially uncontaminated EEG. (default: 0.25) # # # The following are expert-level parameters that you should not tune unless you fully understand # how the method works. # # TruncateQuantile : Truncated Gaussian quantile. Quantile range [upper,lower] of the truncated # Gaussian distribution that shall be fit to the EEG contents. (default: [0.022 0.6]) # # StepSizes : Grid search stepping. Step size of the grid search, in quantiles; separately for # [lower,upper] edge of the truncated Gaussian. The lower edge has finer stepping # because the clean data density is assumed to be lower there, so small changes in # quantile amount to large changes in data space. (default: [0.01 0.01]) # # ShapeRange : Shape parameter range. Search range for the shape parameter of the generalized # Gaussian distribution used to fit clean EEG. (default: 1.7:0.15:3.5) # # Out: # SignalClean : data set with bad time periods removed. # # Mask : mask of retained samples (logical array) window_overlap = 0.66 max_dropout_fraction = 0.1 min_clean_fraction = 0.25 truncate_quant = [0.0220,0.6000] step_sizes = [0.01,0.01] shape_range = np.linspace(1.7,3.5,13) max_bad_channels = np.round(Signal.shape[0]*max_bad_channels); # Signal = Signal *1e6 [C,S] = Signal.shape; N = int(window_len*srate); wnd = np.arange(0,N); offsets = np.int_(np.arange(0,S-N,np.round(N*(1-window_overlap)))) print('Determining time window rejection thresholds...') print('for each channel...') wz=np.array([]) for ichan in range(C): X = Signal[ichan,:]**2 Y=[] for joffset in offsets: Y.append(np.sqrt(np.sum(X[joffset:joffset+N])/N)) Y=np.transpose(Y) mu,sig,alpha,beta = tools.fit_eeg_distribution(Y, min_clean_fraction, max_dropout_fraction,truncate_quant, step_sizes,shape_range) if (ichan==0): wz = (Y-mu)/sig else: wz=np.vstack((wz,np.array((Y-mu)/sig))) # sort z scores into quantiles swz = np.sort(wz,axis=0) # determine which windows to remove if (np.max(zthresholds)>0): remove_mask1 = swz[-(np.int(max_bad_channels)+1),:] > np.max(zthresholds) if (np.min(zthresholds)<0): remove_mask2 = swz[1+np.int(max_bad_channels-1),:] < np.min(zthresholds) remove_mask=np.logical_or(remove_mask1, remove_mask2) removed_windows = np.where(remove_mask) sample_maskidx = [] for iremoved in range(len(removed_windows[0])): if (iremoved==0): sample_maskidx=np.arange(offsets[removed_windows[0][iremoved]],offsets[removed_windows[0][iremoved]]+N) else: sample_maskidx=np.vstack((sample_maskidx,(np.arange(offsets[removed_windows[0][iremoved]],offsets[removed_windows[0][iremoved]]+N)))) sample_mask2remove = np.unique(sample_maskidx) SignalClean = np.delete(Signal,sample_mask2remove,1) sample_mask = np.ones((1, S), dtype=bool) sample_mask[0,sample_mask2remove]=False return SignalClean,sample_mask def YW_filter(Data,srate,iirstate_in): # FilterB, FilterA : Coefficients of an IIR filter that is used to shape the spectrum of the signal # when calculating artifact statistics. The output signal does not go through # this filter. This is an optional way to tune the sensitivity of the algorithm # to each frequency component of the signal. The default filter is less # sensitive at alpha and beta frequencies and more sensitive at delta (blinks) # and gamma (muscle) frequencies. Default: # [b,a] = yulewalk(8,[[0 2 3 13 16 40 min(80,srate/2-1)]*2/srate 1],[3 0.75 0.33 0.33 1 1 3 3]); [C,S] = Data.shape F=np.array([0,2,3,13,16,40,np.minimum(80.0,(srate/2.0)-1.0),srate/2.0])*2.0/srate M = np.array([3,0.75,0.33,0.33,1,1,3,3]) B,A = tools.yulewalk(8,F,M) # apply the signal shaping filter and initialize the IIR filter state DataFilt = np.zeros((C,S)) iirstate = np.zeros((C,len(A)-1)) zi = signal.lfilter_zi(B, A) for ichan in range(C): if (iirstate_in is None): # DataFilt[ichan,:], iirstate[ichan,:] = signal.lfilter(B,A,Data[ichan,:],zi=zi*0)#zi*Data[ichan,0]) DataFilt[ichan,:], iirstate[ichan,:] = signal.lfilter(B,A,Data[ichan,:],zi=zi*Data[ichan,0]) else: DataFilt[ichan,:], iirstate[ichan,:] = signal.lfilter(B,A,Data[ichan,:],zi=iirstate_in[ichan,:]) return DataFilt, iirstate def asr_calibrate(Data,srate,cutoff): # Calibration function for the Artifact Subspace Reconstruction (ASR) method. # State = asr_calibrate(Data,SamplingRate,Cutoff,BlockSize,FilterB,FilterA,WindowLength,WindowOverlap,MaxDropoutFraction,MinCleanFraction) # # The input to this data is a multi-channel time series of calibration data. In typical uses the # calibration data is clean resting EEG data of ca. 1 minute duration (can also be longer). One can # also use on-task data if the fraction of artifact content is below the breakdown point of the # robust statistics used for estimation (50# theoretical, ~30# practical). If the data has a # proportion of more than 30-50# artifacts then bad time windows should be removed beforehand. This # data is used to estimate the thresholds that are used by the ASR processing function to identify # and remove artifact components. # # The calibration data must have been recorded for the same cap design from which data for cleanup # will be recorded, and ideally should be from the same session and same subject, but it is possible # to reuse the calibration data from a previous session and montage to the extent that the cap is # placed in the same location (where loss in accuracy is more or less proportional to the mismatch # in cap placement). # # The calibration data should have been high-pass filtered (for example at 0.5Hz or 1Hz using a # Butterworth IIR filter). # # In: # Data : Calibration data [#channels x #samples]; *zero-mean* (e.g., high-pass filtered) and # reasonably clean EEG of not much less than 30 seconds length (this method is typically # used with 1 minute or more). # # SamplingRate : Sampling rate of the data, in Hz. # # # The following are optional parameters (the key parameter of the method is the RejectionCutoff): # # RejectionCutoff: Standard deviation cutoff for rejection. Data portions whose variance is larger # than this threshold relative to the calibration data are considered missing # data and will be removed. The most aggressive value that can be used without # losing too much EEG is 2.5. A quite conservative value would be 5. Default: 5. # # Blocksize : Block size for calculating the robust data covariance and thresholds, in samples; # allows to reduce the memory and time requirements of the robust estimators by this # factor (down to Channels x Channels x Samples x 16 / Blocksize bytes). Default: 10 # # FilterB, FilterA : Coefficients of an IIR filter that is used to shape the spectrum of the signal # when calculating artifact statistics. The output signal does not go through # this filter. This is an optional way to tune the sensitivity of the algorithm # to each frequency component of the signal. The default filter is less # sensitive at alpha and beta frequencies and more sensitive at delta (blinks) # and gamma (muscle) frequencies. Default: # [b,a] = yulewalk(8,[[0 2 3 13 16 40 min(80,srate/2-1)]*2/srate 1],[3 0.75 0.33 0.33 1 1 3 3]); # # WindowLength : Window length that is used to check the data for artifact content. This is # ideally as long as the expected time scale of the artifacts but short enough to # allow for several 1000 windows to compute statistics over. Default: 0.5. # # WindowOverlap : Window overlap fraction. The fraction of two successive windows that overlaps. # Higher overlap ensures that fewer artifact portions are going to be missed (but # is slower). Default: 0.66 # # MaxDropoutFraction : Maximum fraction of windows that can be subject to signal dropouts # (e.g., sensor unplugged), used for threshold estimation. Default: 0.1 # # MinCleanFraction : Minimum fraction of windows that need to be clean, used for threshold # estimation. Default: 0.25 # # # Out: # State : initial state struct for asr_process [C,S] = Data.shape blocksize = 10 window_len = 0.5 window_overlap = 0.66 max_dropout_fraction = 0.1 min_clean_fraction = 0.25 # F=np.array([0,2,3,13,16,40,np.minimum(80.0,(srate/2.0)-1.0),srate/2.0])*2.0/srate # M = np.array([3,0.75,0.33,0.33,1,1,3,3]) # B,A = tools.yulewalk(8,F,M) # # # apply the signal shaping filter and initialize the IIR filter state # SigFilt = np.zeros((C,S)) # iirstate = np.zeros((C,len(A)-1)) # zi = signal.lfilter_zi(B, A) # for ichan in range(C): # SigFilt[ichan,:], iirstate[ichan,:] = signal.lfilter(B,A,Data[ichan,:],zi=zi*0)#zi*Data[ichan,0]) Data = Data.T U = np.zeros((len(np.arange(0,S,blocksize)),C*C)) for k in range(blocksize): rangevect = np.minimum(S-1,np.arange(k,S+k,blocksize)) Xrange = Data[rangevect,:] for ic in range(C): islice = np.arange((ic*C),((ic+1)*C),1,dtype=int) U[:,islice] = U[:,islice] + (Xrange*np.transpose(np.matlib.repmat(Xrange[:,ic],C,1))) # get the mixing matrix M M = scipy.linalg.sqrtm(np.real(np.reshape(tools.block_geometric_median(U/blocksize,1),(C,C)))); # window length for calculating thresholds N = int(np.round(window_len*srate)) # get the threshold matrix T print('Determining per-component thresholds...'); D,Vtmp = scipy.linalg.eig(M) V=Vtmp[:,np.argsort(D)] X = np.abs(np.dot(Data,V)); offsets = np.int_(np.arange(0,S-N,np.round(N*(1-window_overlap)))) truncate_quant = [0.0220,0.6000] step_sizes = [0.01,0.01] shape_range = np.linspace(1.7,3.5,13) mu=np.zeros(C) sig=np.zeros(C) for ichan in range(C): rms = X[:,ichan]**2 Y=[] for joffset in offsets: Y.append(np.sqrt(np.sum(rms[joffset:joffset+N])/N)) Y=np.transpose(Y) mu[ichan],sig[ichan],alpha,beta = tools.fit_eeg_distribution(Y, min_clean_fraction, max_dropout_fraction,truncate_quant, step_sizes,shape_range) T = np.dot(np.diag(mu + cutoff*sig),V.T) # print('mu',mu) # print('sig',sig) # print('done.'); calibASRparam= {'M':M,'T':T} return calibASRparam #'cov',[],'carry',[],'iir',iirstate,'last_R',[],'last_trivial',true} # initialize the remaining filter state #state = struct('M',M,'T',T,'B',B,'A',A,'cov',[],'carry',[],'iir',iirstate,'last_R',[],'last_trivial',true); def asr_process_on_epoch(epoch2correct, epochYWfiltered,state): # Processing function for the Artifact Subspace Reconstruction (ASR) method. # EpochClean = asr_process_on_epoch(epoch2correct, epochYWfiltered,state) # # This function is used to clean multi-channel signal using the ASR method. The required inputs are # the data matrix, the sampling rate of the data, and the filter state (as initialized by # asr_calibrate). [C,S] = epochYWfiltered.shape epochYWfiltered = scipy.signal.detrend(epochYWfiltered, axis=1, type='constant') Xcov = np.cov(epochYWfiltered,bias=True) D,Vtmp = np.linalg.eig(Xcov) V=np.real(Vtmp[:,np.argsort(D)]) D=np.real(D[np.argsort(D)]) maxdims = int(np.fix(0.66*C)) #determine which components to keep (variance below directional threshold or not admissible for rejection) keep=(D<np.sum(np.dot(state['T'],V)**2,axis=0)) + ((np.arange(C))<(C-maxdims)) trivial = keep.all() # update the reconstruction matrix R (reconstruct artifact components using the mixing matrix) if trivial: R =
np.eye(C)
numpy.eye
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import os, sys, random, time, math import tensorflow as tf import cv2 sys.path.append(os.path.realpath('./datasets')) import mask_rcnn_coco as coco sys.path.append(os.path.realpath('./Mask_RCNN/mrcnn')) import utils import model as modellib sys.path.append(os.path.realpath('./utils')) from image_sequence import ImageSequence tf.app.flags.DEFINE_string('checkpoint_path', None, 'Path to checkpoint file.') tf.app.flags.DEFINE_string('image_in_path', None, 'Path to input image directory.') tf.app.flags.DEFINE_string('detections_out_path', None, 'Path to detections output file.') tf.app.flags.DEFINE_integer('max_frames', 100000, 'Maximum number of frames to log.') tf.app.flags.DEFINE_integer('stride', 5, 'Interval at which detections are computed.') FLAGS = tf.app.flags.FLAGS # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] def draw_boxes(frame, boxes): for bidx, bbox in enumerate(boxes): iy = int(bbox[0] * frame.shape[0]) ix = int(bbox[1] * frame.shape[1]) h = int((bbox[2] - bbox[0]) * frame.shape[0]) w = int((bbox[3] - bbox[1]) * frame.shape[1]) cv2.rectangle(frame, (ix, iy), (ix + w, iy + h), (0, 255, 0), 8) def apply_mask(image, mask, color, alpha=0.5): """Apply the given mask to the image. """ for c in range(3): image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c], image[:, :, c]) return image def process_detections(detections, mrcnn_mask, image_shape, window): # How many detections do we have? # Detections array is padded with zeros. Find the first class_id == 0. zero_ix = np.where(detections[:, 4] == 0)[0] N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0] # Extract boxes, class_ids, scores, and class-specific masks boxes = detections[:N, :4] class_ids = detections[:N, 4].astype(np.int32) scores = detections[:N, 5] masks = mrcnn_mask[np.arange(N), :, :, class_ids] # Compute scale and shift to translate coordinates to image domain. h_scale = float(image_shape[0]) / (window[2] - window[0]) w_scale = float(image_shape[1]) / (window[3] - window[1]) scale = min(h_scale, w_scale) shift = window[:2] # y, x scales = np.array([scale, scale, scale, scale]) shifts = np.array([shift[0], shift[1], shift[0], shift[1]]) # Translate bounding boxes to image domain boxes = np.multiply(boxes - shifts, scales).astype(np.int32) # Filter out detections with zero area. Often only happens in early # stages of training when the network weights are still a bit random. exclude_ix = np.where( (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0] if exclude_ix.shape[0] > 0: boxes = np.delete(boxes, exclude_ix, axis=0) class_ids = np.delete(class_ids, exclude_ix, axis=0) scores = np.delete(scores, exclude_ix, axis=0) masks = np.delete(masks, exclude_ix, axis=0) N = class_ids.shape[0] return boxes, class_ids, scores, masks def display_instances(image, boxes, masks, class_ids, class_names, scores=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [num_instances, height, width] class_ids: [num_instances] class_names: list of class names of the dataset """ # Number of instances N = boxes.shape[0] if N == 0: return image.copy() assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # Show area outside image boundaries. height, width = image.shape[:2] masked_image = image.copy() for i in range(N): color = (0, 0, 255) # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] cv2.rectangle(masked_image, (x1, y1), (x2, y2), color, 4) # Label class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] # Mask mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) return masked_image class InferenceConfig(coco.CocoConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 DETECTION_MIN_CONFIDENCE = 0.5 def run_model(checkpoint_path, image_in_path, detections_out_path): config = InferenceConfig() config.display() model = modellib.MaskRCNN(mode="inference", model_dir='./logs', config=config) model.load_weights(checkpoint_path, by_name=True) s = ImageSequence(image_in_path, '*.png') count = 0 frame_detections = {} for frame, name in s: start = time.time() molded_images, image_metas, windows = model.mold_inputs([frame]) # Run object detection detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \ rois, rpn_class, rpn_bbox = \ model.keras_model.predict([molded_images, image_metas], verbose=0) zero_ix = np.where(detections[0][:, 4] == 0)[0] N = zero_ix[0] if zero_ix.shape[0] > 0 else detections[0].shape[0] # Extract boxes, class_ids, scores, and class-specific masks boxes, class_ids, scores, masks = process_detections(detections[0], mrcnn_mask[0], frame.shape, windows[0]) print(boxes.shape, class_ids.shape, scores.shape, masks.shape) boxes = boxes.astype(np.float32) boxes[:, 0] = boxes[:, 0]/frame.shape[0] boxes[:, 2] = boxes[:, 2]/frame.shape[0] boxes[:, 1] = boxes[:, 1]/frame.shape[1] boxes[:, 3] = boxes[:, 3]/frame.shape[1] frame_detections[name] = [boxes, class_ids, scores, masks] print(class_ids) end = time.time() print('time', count, end - start) count = count + 1 if detections_out_path:
np.save(detections_out_path, frame_detections)
numpy.save
"""Integration testing of pipelines.""" import subprocess import time import fsspec import numpy as np import pandas as pd import pytest import xarray as xr # classes tested here from pangeo_forge.pipelines.base import AbstractPipeline from pangeo_forge.pipelines.http_xarray_zarr import HttpXarrayZarrMixin # where to run the http server _PORT = "8080" _ADDRESS = "127.0.0.1" @pytest.fixture(scope="session") def daily_xarray_dataset(): """Return a synthetic random xarray dataset.""" np.random.seed(1) nt, ny, nx = 10, 18, 36 time = pd.date_range(start="2010-01-01", periods=nt, freq="D") lon = (np.arange(nx) + 0.5) * 360 / nx lon_attrs = {"units": "degrees_east", "long_name": "longitude"} lat = (np.arange(ny) + 0.5) * 180 / ny lat_attrs = {"units": "degrees_north", "long_name": "latitude"} foo =
np.random.rand(nt, ny, nx)
numpy.random.rand
import numpy as np import torch import logging import sys import os from datetime import datetime import copy import argparse import tensorboardX from tensorboardX import SummaryWriter import shutil import pickle import pandas as pd import joblib import yaml import random def seed_all(seed=None): """Set seed for numpy, random, torch.""" np.random.seed(seed) random.seed(seed) # This will seed both cpu and cuda. if seed is None: torch.seed() else: torch.manual_seed(seed) def timestamp(): """Return a string timestamp.""" return datetime.now().strftime('%Y-%m-%d_%H-%M-%S') def init_logging(to_file=False, filename=None): """ Initialize the logging module to print to stdout and (optionally) a file. Call this only once at the beginning of your script. To use the logging module, do `import logging` at the top of your file and use `logging.info` instead of `print. Note: Doesn't work properly inside parallel processes spawned via joblib. Args: to_file (bool, optional): Whether to write to a file in addition to the console (default: False). filename (str, optional): The filename to store the log to. If None, use the current timestamp.log (default: None). """ if to_file: if filename is None: filename = timestamp() + '.log' logging.basicConfig(level=logging.INFO, format='%(message)s', filename=filename) logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # write to stdout + file print('Logging to:', filename) else: logging.basicConfig(level=logging.INFO, format='%(message)s') def requires_grad(x): """Recursively check whether an object, of any type, requires grad computation for its elements or not. Args: x: object of any type Returns: boolean inidicating whether the elements of x require grad""" if isinstance(x, torch.Tensor): return x.requires_grad elif isinstance(x, list) and not (x == []): return requires_grad(x[0]) else: return False def state_requires_grad(x): """Recursively turn requires_grad to True to the elements of an object x. Args: x: object of any type Returns: x: object where the elements require grad""" if isinstance(x, torch.Tensor): x.requires_grad = True elif isinstance(x, list): for i in range(len(x)): x[i] = state_requires_grad(x[i]) return x def detach(x): """Recursively detach the grads of elements in x. Args: x: object of any type Returns: x_detach: object where all elements have been detached""" if isinstance(x, torch.Tensor): return x.detach() elif isinstance(x, list): for i in range(len(x)): x[i] = detach(x[i]) return x def deepcopy(item): """Deepcopy function that can deal with classes with attributes that require_grad. It detaches those variables from its grad, and then sets again requires_grad to true Args: item: object with arbitrary attributes Returnd: item: equivalent object with all of its gradients detached""" try: return copy.deepcopy(item) except: # Detach grad when necessary key_requires_grad = [] for key, value in zip(item.__dict__.keys(), item.__dict__.values()): if requires_grad(value): value_detached = detach(value) setattr(item, key, value_detached) key_requires_grad.append(key) # Set requires_grad to True when necessary item_copy = copy.deepcopy(item) for key in key_requires_grad: value = getattr(item_copy, key) setattr(item_copy, key, state_requires_grad(value)) return item_copy def str2bool(v): """ Convert str to bool for use in boolean argparse options. From: https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse """ if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def load_params_file(filename): """Load parameters from yaml file and return as dictionary.""" with open(filename, 'r') as f: params = yaml.safe_load(f) return params def load_params(mode): """Load parameters from params file and terminal input. Args: mode (str): The mode of the experiment; either 'wann' or 'wlnn'. """ # Set up argparse. parser = argparse.ArgumentParser() # Evolution options. egroup = parser.add_argument_group('evolution options') egroup.add_argument('--cull_ratio', type=float, help='Fraction of worst networks to leave out of the breeding pool') egroup.add_argument('--elite_ratio', type=float, help='Fraction of best networks to pass on to the new population directly') egroup.add_argument('--num_generations', type=int, help='Number of generations for evolution') egroup.add_argument('--population_size', type=int, help='Number of networks in the population') egroup.add_argument('--p_add_connection', type=float, help='Probability of adding a connection during mutation') egroup.add_argument('--p_add_node', type=float, help='Probability of adding a node during mutation') if mode == 'wann': egroup.add_argument('--p_change_activation', type=float, help='Probability of changing the activation of a node during mutation') egroup.add_argument('--tournament_size', type=int, help='Number of networks that compete during tournament selection') egroup.add_argument('--inherit_weights', type=str2bool, help='Keep weights after mutating a network') egroup.add_argument('--num_mutations_per_generation', type=int, help='The number of mutations to carry out at each generation') egroup.add_argument('--p_complexity_objective', type=float, help='The fraction of generations to rank according to complexity and ' 'reward (otherwise only reward)') # Evaluation options. vgroup = parser.add_argument_group('evaluation options') vgroup.add_argument('--batch_size', type=int, help='Batch size for evaluation (in run-wann-classification.py) or ' 'learning (in run-wlnn-mnist-no-evolution.py and run-wlnn-mnist.py)') vgroup.add_argument('--batch_size_eval', type=int, help='Batch size for test set evaluation of learned networks') if mode == 'wlnn': vgroup.add_argument('--evaluation_period', type=str, choices=['integral', 'last_batch', \ 'last_ten_batches', 'first_ten_batches', 'last_epoch'], default='last_epoch', help='Which training period should be used to evaluate networks. The ' + 'options are: "integral" the mean across the whole training ' + 'duration, "last_batch" the values in the final batch, ' + '"last_ten_batches" the values in the last ten batches, ' + '"first_ten_batches" the values in the first ten batches and ' + '"last_epoch" the values averaged in the last epoch only.') vgroup.add_argument('--num_trials', type=int, help='How often to run the gym environment during evaluation') # Training options. if mode == 'wlnn': tgroup = parser.add_argument_group('training options') tgroup.add_argument('--learning_rule', type=str, help='Learning rule to train network') tgroup.add_argument('--lr', type=float, help='Learning rate') tgroup.add_argument('--num_epochs', type=int, help='Number of epochs for learning') tgroup.add_argument('--optimizer', type=str, help='Optimizer to train network (sgd, adam or adadelta)') tgroup.add_argument('--train_only_outputs', action='store_true', help='If this option is selected, only the weights to the output ' + 'units will be learned. Else, all weights will be learned.') # Architecture options. agroup = parser.add_argument_group('architecture options') agroup.add_argument('--num_inputs', type=int, help='Number of input neurons') agroup.add_argument('--num_outputs', type=int, help='Number of output neurons') agroup.add_argument('--p_initial_connection_enabled', type=float, help='Probability of enabling a connection between input and output layer ' 'at the start of evolution') # Task and dataset options. dgroup = parser.add_argument_group('task and dataset options') dgroup.add_argument('params_file', type=str, help='A yaml file with parameters (see folder params for examples)') dgroup.add_argument('--dataset', type=str, help='Dataset for classification (digits or mnist)') dgroup.add_argument('--env_name', type=str, help='Name of the gym environment') # Computational options. cgroup = parser.add_argument_group('computational options') cgroup.add_argument('--num_workers', type=int, help='Number of workers to run on') cgroup.add_argument('--take_argmax_action', type=str2bool, help='Use argmax of the network output as the action in the environment') cgroup.add_argument('--use_cuda', type=str2bool, help='Use cuda devices if available') cgroup.add_argument('--use_torch', type=str2bool, help='Use torch instead of numpy') # Miscellaneous options. mgroup = parser.add_argument_group('miscellaneous options') mgroup.add_argument('--out_dir', type=str, default='', help='The path to the output directory') mgroup.add_argument('--overwrite_output', action='store_true', help='Overwrite data in the output folder if it already exists.') args = parser.parse_args() # Read params from yaml file. params = load_params_file(args.params_file) # Update with any direct input from the terminal. params_args = vars(args) del params_args['params_file'] for key, value in params_args.items(): if value is not None: params[key] = value # If no out_dir path is provided through the command line arguments, # generate a path based on the current time. if params['out_dir'] == '': params['out_dir'] = create_out_dir_name(params) return params def create_out_dir_name(params): """ Create output directory name for the experiment based on the current date and time. Args: params (dict): The parameters of the experiment. Returns: str: The path to the output directory. """ current_timestamp = timestamp() out_dir = os.path.join('out', current_timestamp) return out_dir def init_output(params, script_name=None, overwrite=False): """ Initialize all output stuff for an experiment, namely: - create output folder based on the current date and time - store the params in this output folder - set up a log in this output folder (can be used via logging.info from everywhere) - set up a tensorboard writer in this output folder Args: params (dict): The parameters of the experiment. script_name (str): The name of the script, will be written to the log. If None, this will be inferred from terminal input (default: None). overwrite (bool, optional): If True, overwrite the output dir. If False, raise an exception if the output dir exists (default: False). Returns: tensorboardX.SummaryWriter: The tensorboard writer. str: The path to the output directory. """ out_dir = params['out_dir'] # Make output folder. if os.path.exists(out_dir): if overwrite: shutil.rmtree(out_dir) print('Output dir exists, deleting it (overwrite is set to True):', out_dir) else: raise IOError('Output dir already exists, set overwrite=True to overwrite:', out_dir) os.makedirs(out_dir) print('Created output dir:', out_dir) # Save params to out dir as pickle and yaml. # TODO: Do we use the pickled params somewhere or should we only store yaml? with open(os.path.join(out_dir, 'params.pkl'), 'wb') as f: pickle.dump(params, f) with open(os.path.join(out_dir, 'params.yaml'), 'w') as f: yaml.dump(params, f, default_flow_style=False) # Initialize tensorboard summary writer. if not hasattr(tensorboardX, '__version__'): writer = SummaryWriter(log_dir=os.path.join(out_dir, 'tensorboard_summary')) else: writer = SummaryWriter(logdir=os.path.join(out_dir, 'tensorboard_summary')) # Initialize logging. init_logging(to_file=True, filename=os.path.join(out_dir, os.path.basename(out_dir) + '.log')) # Write header to log (including script name and parameters). if script_name is None: script_name = os.path.basename(sys.argv[0]) logging.info(f'Running {script_name}') logging.info(f'{joblib.cpu_count()} cpu core(s) and ' f'{torch.cuda.device_count()} cuda devices available') logging.info('-' * 80) logging.info('Parameters:') for key, value in params.items(): logging.info(f'{key}: {value}') logging.info('-' * 80) return writer, out_dir def store_performance(results, out_dir='', name='results_summary'): """Store a summary of the performance for the current run in a .csv file. Args: results: np.array of dimension (population_size, 4), where each column contains, for each network in the population, the final: * mean_rewards * max_rewards * mean_accuracies * max_accuracies across all different random seeds """ results_file = os.path.join(out_dir, name + '.csv') results_summary = { 'pop_mean_accuracies': ['%.2f' % (100 * np.mean(results[:, 1]))], 'pop_max_accuracies': ['%.2f' % (100 * np.max(results[:, 1]))], 'pop_mean_rewards': [np.mean(results[:, 0])], 'pop_max_rewards': [np.max(results[:, 0])], } df = pd.DataFrame.from_dict(results_summary) if os.path.isfile(results_file): old_df = pd.read_csv(results_file, sep=';') df = pd.concat([old_df, df], sort=True) df.to_csv(results_file, sep=';', index=False) def log_network_stats(population, writer=None, iteration=None): """ Write statistics about networks in the population (neurons, connections, layers) to logging.info and tensorboard. Args: population (iterable): The population of networks. writer (tensorboardX.SummaryWriter, optional): The tensorboard writer (default: None). iteration (int, optional): Store the values for this iteration in tensorboard (default: None). """ num_connections = [net.get_num_connections() for net in population] num_neurons = [net.num_neurons for net in population] num_layers = [len(net.neurons_in_layer) for net in population] logging.info( f'Connections: {np.mean(num_connections):.0f} +- {np.std(num_connections):.0f} ' f'({np.min(num_connections)}-{np.max(num_connections)}), ' f'Neurons: {
np.mean(num_neurons)
numpy.mean
import pytest import numpy as np from scipy.special import erf from os.path import join, dirname from numpy.testing import assert_allclose # Import main modelling routines from empymod directly to ensure they are in # the __init__.py-file. from empymod import bipole, dipole, analytical, loop # Import rest from model from empymod.model import gpr, dipole_k, fem, tem from empymod.kernel import fullspace, halfspace # These are kind of macro-tests, as they check the final results. # I try to use different parameters for each test, to cover a wide range of # possibilities. It won't be possible to check all the possibilities though. # Add tests when issues arise! # Load required data # Data generated with create_self.py DATAEMPYMOD = np.load(join(dirname(__file__), 'data/empymod.npz'), allow_pickle=True) # Data generated with create_data/fem_tem.py DATAFEMTEM = np.load(join(dirname(__file__), 'data/fem_tem.npz'), allow_pickle=True) # Data generated with create_data/green3d.py GREEN3D = np.load(join(dirname(__file__), 'data/green3d.npz'), allow_pickle=True) # Data generated with create_data/dipole1d.py DIPOLE1D = np.load(join(dirname(__file__), 'data/dipole1d.npz'), allow_pickle=True) # Data generated with create_data/emmod.py EMMOD = np.load(join(dirname(__file__), 'data/emmod.npz'), allow_pickle=True) # Data generated with create_data/regression.py REGRES = np.load(join(dirname(__file__), 'data/regression.npz'), allow_pickle=True) class TestBipole: def test_fullspace(self): # Comparison to analytical fullspace solution fs = DATAEMPYMOD['fs'][()] fsbp = DATAEMPYMOD['fsbp'][()] for key in fs: # Get fullspace fs_res = fullspace(**fs[key]) # Get bipole bip_res = bipole(**fsbp[key]) # Check assert_allclose(fs_res, bip_res) def test_halfspace(self): # Comparison to analytical halfspace solution hs = DATAEMPYMOD['hs'][()] hsbp = DATAEMPYMOD['hsbp'][()] for key in hs: # Get halfspace hs_res = halfspace(**hs[key]) # Get bipole bip_res = bipole(**hsbp[key]) # Check if key in ['12', '13', '21', '22', '23', '31']: # t-domain ex. rtol = 1e-2 else: rtol = 1e-7 assert_allclose(hs_res, bip_res, rtol=rtol) def test_emmod(self): # Comparison to EMmod (Hunziker et al., 2015) # Comparison f = [0.013, 1.25, 130] Hz.; 11 models, 34 ab's, f altern. dat = EMMOD['res'][()] for _, val in dat.items(): res = bipole(**val[0]) assert_allclose(res, val[1], 3e-2, 1e-17, True) def test_dipole1d(self): # Comparison to DIPOLE1D (Key, Scripps) def crec(rec, azm, dip): return [rec[0], rec[1], rec[2], azm, dip] def get_xyz(src, rec, depth, res, freq, srcpts): ex = bipole(src, crec(rec, 0, 0), depth, res, freq, srcpts=srcpts, mrec=False, verb=0) ey = bipole(src, crec(rec, 90, 0), depth, res, freq, srcpts=srcpts, mrec=False, verb=0) ez = bipole(src, crec(rec, 0, 90), depth, res, freq, srcpts=srcpts, mrec=False, verb=0) mx = bipole(src, crec(rec, 0, 0), depth, res, freq, srcpts=srcpts, mrec=True, verb=0) my = bipole(src, crec(rec, 90, 0), depth, res, freq, srcpts=srcpts, mrec=True, verb=0) mz = bipole(src, crec(rec, 0, 90), depth, res, freq, srcpts=srcpts, mrec=True, verb=0) return ex, ey, ez, mx, my, mz def comp_all(data, rtol=1e-3, atol=1e-24): inp, res = data Ex, Ey, Ez, Hx, Hy, Hz = get_xyz(**inp) assert_allclose(Ex, res[0], rtol, atol, True) assert_allclose(Ey, res[1], rtol, atol, True) assert_allclose(Ez, res[2], rtol, atol, True) assert_allclose(Hx, res[3], rtol, atol, True) assert_allclose(Hy, res[4], rtol, atol, True) assert_allclose(Hz, res[5], rtol, atol, True) # DIPOLES # 1. x-directed dipole comp_all(DIPOLE1D['xdirdip'][()]) # 2. y-directed dipole comp_all(DIPOLE1D['ydirdip'][()]) # 3. z-directed dipole comp_all(DIPOLE1D['zdirdip'][()]) # 4. dipole in xy-plane comp_all(DIPOLE1D['xydirdip'][()]) # 5. dipole in xz-plane comp_all(DIPOLE1D['xzdirdip'][()]) # 6. dipole in yz-plane comp_all(DIPOLE1D['yzdirdip'][()]) # 7. arbitrary xyz-dipole comp_all(DIPOLE1D['xyzdirdip'][()]) # Bipoles # 8. x-directed bipole comp_all(DIPOLE1D['xdirbip'][()]) # 9. y-directed bipole comp_all(DIPOLE1D['ydirbip'][()]) # 10. z-directed bipole comp_all(DIPOLE1D['zdirbip'][()]) # 11. bipole in xy-plane comp_all(DIPOLE1D['xydirbip'][()]) # 12. bipole in xz-plane comp_all(DIPOLE1D['xzdirbip'][()]) # 13. bipole in yz-plane comp_all(DIPOLE1D['yzdirbip'][()]) # 14. arbitrary xyz-bipole comp_all(DIPOLE1D['xyzdirbip'][()]) # 14.b Check bipole reciprocity inp, res = DIPOLE1D['xyzdirbip'][()] ex = bipole(crec(inp['rec'], 0, 0), inp['src'], inp['depth'], inp['res'], inp['freq'], recpts=inp['srcpts'], verb=0) assert_allclose(ex, res[0], 2e-2, 1e-24, True) mx = bipole(crec(inp['rec'], 0, 0), inp['src'], inp['depth'], inp['res'], inp['freq'], msrc=True, recpts=inp['srcpts'], verb=0) assert_allclose(-mx, res[3], 2e-2, 1e-24, True) def test_green3d(self): # Comparison to green3d (CEMI Consortium) def crec(rec, azm, dip): return [rec[0], rec[1], rec[2], azm, dip] def get_xyz(src, rec, depth, res, freq, aniso, strength, srcpts, msrc): ex = bipole(src, crec(rec, 0, 0), depth, res, freq, aniso=aniso, msrc=msrc, mrec=False, strength=strength, srcpts=srcpts, verb=0) ey = bipole(src, crec(rec, 90, 0), depth, res, freq, aniso=aniso, msrc=msrc, mrec=False, strength=strength, srcpts=srcpts, verb=0) ez = bipole(src, crec(rec, 0, 90), depth, res, freq, aniso=aniso, msrc=msrc, mrec=False, strength=strength, srcpts=srcpts, verb=0) mx = bipole(src, crec(rec, 0, 0), depth, res, freq, aniso=aniso, msrc=msrc, mrec=True, strength=strength, srcpts=srcpts, verb=0) my = bipole(src, crec(rec, 90, 0), depth, res, freq, aniso=aniso, msrc=msrc, mrec=True, strength=strength, srcpts=srcpts, verb=0) mz = bipole(src, crec(rec, 0, 90), depth, res, freq, aniso=aniso, msrc=msrc, mrec=True, strength=strength, srcpts=srcpts, verb=0) return ex, ey, ez, mx, my, mz def comp_all(data, rtol=1e-3, atol=1e-24): inp, res = data Ex, Ey, Ez, Hx, Hy, Hz = get_xyz(**inp) assert_allclose(Ex, res[0], rtol, atol, True) assert_allclose(Ey, res[1], rtol, atol, True) assert_allclose(Ez, res[2], rtol, atol, True) assert_allclose(Hx, res[3], rtol, atol, True) assert_allclose(Hy, res[4], rtol, atol, True) assert_allclose(Hz, res[5], rtol, atol, True) # ELECTRIC AND MAGNETIC DIPOLES # 1. x-directed electric and magnetic dipole comp_all(GREEN3D['xdirdip'][()]) comp_all(GREEN3D['xdirdipm'][()]) # 2. y-directed electric and magnetic dipole comp_all(GREEN3D['ydirdip'][()]) comp_all(GREEN3D['ydirdipm'][()]) # 3. z-directed electric and magnetic dipole comp_all(GREEN3D['zdirdip'][()], 5e-3) comp_all(GREEN3D['zdirdipm'][()], 5e-3) # 4. xy-directed electric and magnetic dipole comp_all(GREEN3D['xydirdip'][()]) comp_all(GREEN3D['xydirdipm'][()]) # 5. xz-directed electric and magnetic dipole comp_all(GREEN3D['xzdirdip'][()], 5e-3) comp_all(GREEN3D['xzdirdipm'][()], 5e-3) # 6. yz-directed electric and magnetic dipole comp_all(GREEN3D['yzdirdip'][()], 5e-3) comp_all(GREEN3D['yzdirdipm'][()], 5e-3) # 7. xyz-directed electric and magnetic dipole comp_all(GREEN3D['xyzdirdip'][()], 2e-2) comp_all(GREEN3D['xyzdirdipm'][()], 2e-2) # 7.b Check magnetic dipole reciprocity inp, res = GREEN3D['xyzdirdipm'][()] ey = bipole(crec(inp['rec'], 90, 0), inp['src'], inp['depth'], inp['res'], inp['freq'], None, inp['aniso'], mrec=inp['msrc'], msrc=False, strength=inp['strength'], srcpts=1, recpts=inp['srcpts'], verb=0) assert_allclose(-ey, res[1], 2e-2, 1e-24, True) # ELECTRIC AND MAGNETIC BIPOLES # 8. x-directed electric and magnetic bipole comp_all(GREEN3D['xdirbip'][()], 5e-3) comp_all(GREEN3D['xdirbipm'][()], 5e-3) # 8.b Check electric bipole reciprocity inp, res = GREEN3D['xdirbip'][()] ex = bipole(crec(inp['rec'], 0, 0), inp['src'], inp['depth'], inp['res'], inp['freq'], None, inp['aniso'], mrec=inp['msrc'], msrc=False, strength=inp['strength'], srcpts=1, recpts=inp['srcpts'], verb=0) assert_allclose(ex, res[0], 5e-3, 1e-24, True) # 9. y-directed electric and magnetic bipole comp_all(GREEN3D['ydirbip'][()], 5e-3) comp_all(GREEN3D['ydirbipm'][()], 5e-3) # 10. z-directed electric and magnetic bipole comp_all(GREEN3D['zdirbip'][()], 5e-3) comp_all(GREEN3D['zdirbipm'][()], 5e-3) def test_status_quo(self): # Comparison to self, to ensure nothing changed. # 4 bipole-bipole cases in EE, ME, EM, MM, all different values for i in ['1', '2', '3', '4']: res = DATAEMPYMOD['out'+i][()] tEM = bipole(**res['inp']) assert_allclose(tEM, res['EM'], rtol=5e-5) # 5e-5 shouldn't be... def test_dipole_bipole(self): # Compare a dipole to a bipole # Checking intpts, strength, reciprocity inp = {'depth': [0, 250], 'res': [1e20, 0.3, 5], 'freqtime': 1} rec = [8000, 200, 300, 0, 0] bip1 = bipole([-25, 25, -25, 25, 100, 170.7107], rec, srcpts=1, strength=33, **inp) bip2 = bipole(rec, [-25, 25, -25, 25, 100, 170.7107], recpts=5, strength=33, **inp) dip = bipole([0, 0, 135.3553, 45, 45], [8000, 200, 300, 0, 0], **inp) # r = 100; sI = 33 => 3300 assert_allclose(bip1, dip*3300, 1e-5) # bipole as dipole assert_allclose(bip2, dip*3300, 1e-2) # bipole, src/rec switched. def test_loop(self, capsys): # Compare loop options: None, 'off', 'freq' inp = {'depth': [0, 500], 'res': [10, 3, 50], 'freqtime': [1, 2, 3], 'rec': [[6000, 7000, 8000], [200, 200, 200], 300, 0, 0], 'src': [0, 0, 0, 0, 0]} non = bipole(loop=None, verb=3, **inp) out, _ = capsys.readouterr() assert "Loop over : None (all vectorized)" in out lpo = bipole(loop='off', verb=3, **inp) out, _ = capsys.readouterr() assert "Loop over : Offsets" in out assert_allclose(non, lpo, equal_nan=True) lfr = bipole(loop='freq', verb=3, **inp) out, _ = capsys.readouterr() assert "Loop over : Frequencies" in out assert_allclose(non, lfr, equal_nan=True) def test_hankel(self, capsys): # Compare Hankel transforms inp = {'depth': [-20, 100], 'res': [1e20, 5, 100], 'freqtime': [1.34, 23, 31], 'src': [0, 0, 0, 0, 90], 'rec': [[200, 300, 400], [3000, 4000, 5000], 120, 90, 0]} dlf = bipole(ht='dlf', htarg={'pts_per_dec': 0}, verb=3, **inp) out, _ = capsys.readouterr() assert "Hankel : DLF (Fast Hankel Transform)" in out assert " > DLF type : Standard" in out assert "Loop over : None" in out dlf2 = bipole(ht='dlf', htarg={'pts_per_dec': -1}, verb=3, **inp) out, _ = capsys.readouterr() assert "Hankel : DLF (Fast Hankel Transform)" in out assert " > DLF type : Lagged Convolution" in out assert "Loop over : Frequencies" in out assert_allclose(dlf, dlf2, rtol=1e-4) dlf3 = bipole(ht='dlf', htarg={'pts_per_dec': 40}, verb=3, **inp) out, _ = capsys.readouterr() assert "Hankel : DLF (Fast Hankel Transform)" in out assert " > DLF type : Splined, 40.0 pts/dec" in out assert "Loop over : Frequencies" in out assert_allclose(dlf, dlf3, rtol=1e-3) qwe = bipole(ht='qwe', htarg={'pts_per_dec': 0}, verb=3, **inp) out, _ = capsys.readouterr() assert "Hankel : Quadrature-with-Extrapolation" in out assert_allclose(dlf, qwe, equal_nan=True) quad = bipole(ht='quad', htarg={'b': 1, 'pts_per_dec': 1000}, verb=3, **inp) out, _ = capsys.readouterr() assert "Hankel : Quadrature" in out assert_allclose(dlf, quad, equal_nan=True) def test_fourier(self, capsys): # Compare Fourier transforms inp = {'depth': [0, 300], 'res': [1e12, 1/3, 5], 'freqtime': np.logspace(-1.5, 1, 20), 'signal': 0, 'rec': [2000, 300, 280, 0, 0], 'src': [0, 0, 250, 0, 0]} ftl = bipole(ft='fftlog', verb=3, **inp) out, _ = capsys.readouterr() assert "Fourier : FFTLog" in out qwe = bipole(ft='qwe', ftarg={'pts_per_dec': 30}, verb=3, **inp) out, _ = capsys.readouterr() assert "Fourier : Quadrature-with-Extrapolation" in out assert_allclose(qwe, ftl, 1e-2, equal_nan=True) dlf = bipole(ft='dlf', verb=3, **inp) out, _ = capsys.readouterr() assert "Fourier : DLF (Sine-Filter)" in out assert_allclose(dlf, ftl, 1e-2, equal_nan=True) # FFT: We keep the error-check very low, otherwise we would have to # calculate too many frequencies. fft = bipole( ft='fft', ftarg={'dfreq': 0.002, 'nfreq': 2**13, 'ntot': 2**16}, verb=3, **inp) out, _ = capsys.readouterr() assert "Fourier : Fast Fourier Transform FFT" in out assert_allclose(fft, ftl, 1e-1, 1e-13, equal_nan=True) def test_example_wrong(self): # One example of wrong input. But inputs are checked in test_utils.py. with pytest.raises(ValueError, match="Parameter src has wrong length"): bipole([0, 0, 0], [0, 0, 0, 0, 0], [], 1, 1, verb=0) def test_combinations(self): # These are the 15 options that each bipole (src or rec) can take. # There are therefore 15x15 possibilities for src-rec combination # within bipole! # Here we are just checking a few possibilities... But these should # cover the principle and therefore hold for all cases. inp = {'depth': [-100, 300], 'res': [1e20, 1, 10], 'freqtime': [0.5, 0.9], 'src': [0, 0, 0, 0, 0]} # one_depth dipole asdipole one_bpdepth # ===================================================== # . . . TRUE TRUE TRUE TRUE # ----------------------------------------------------- # | | . TRUE TRUE TRUE TRUE # ----------------------------------------------------- # | | | false TRUE TRUE TRUE # ----------------------------------------------------- # . . . . . . TRUE false TRUE TRUE # TRUE false false TRUE # TRUE false TRUE false # TRUE false false false # ----------------------------------------------------- # | | | | . . TRUE false TRUE TRUE # TRUE false false TRUE # TRUE false TRUE false # TRUE false false false # ----------------------------------------------------- # | | | | | | false false TRUE TRUE # false false false TRUE # false false TRUE false # false false false false # ----------------------------------------------------- # 1.1 three different dipoles da = bipole(rec=[7000, 500, 100, 0, 0], **inp) db = bipole(rec=[8000, 500, 200, 0, 0], **inp) dc = bipole(rec=[9000, 500, 300, 0, 0], **inp) # 1.2 three dipoles at same depth at once => comp to 1.1 dd = bipole(rec=[[7000, 8000, 9000], [500, 500, 500], 100, 0, 0], **inp) de = bipole(rec=[[7000, 8000, 9000], [500, 500, 500], 200, 0, 0], **inp) df = bipole(rec=[[7000, 8000, 9000], [500, 500, 500], 300, 0, 0], **inp) assert_allclose(dd[:, 0], da) assert_allclose(de[:, 1], db) assert_allclose(df[:, 2], dc) # 1.3 three dipoles at different depths at once => comp to 1.1 dg = bipole(rec=[[7000, 8000, 9000], [500, 500, 500], [100, 200, 300], 0, 0], **inp) assert_allclose(dg[:, 0], da) assert_allclose(dg[:, 1], db) assert_allclose(dg[:, 2], dc) # 2.1 three different bipoles # => asdipole/!asdipole/one_bpdepth/!one_bpdepth ba = bipole(rec=[7000, 7050, 100, 100, 2.5, 2.5], **inp) bb = bipole(rec=[7000, 7050, 100, 100, 2.5, 2.5], recpts=10, **inp) bc = bipole(rec=[7000, 7050, 100, 100, 0, 5], **inp) bd = bipole(rec=[7000, 7050, 100, 100, 0, 5], recpts=10, **inp) assert_allclose(ba, bb, 1e-3) assert_allclose(bc, bd, 1e-3) assert_allclose(ba, bc, 1e-2) # As the dip is very small # 2.2 three bipoles at same depth at once # => asdipole/!asdipole/one_bpdepth/!one_bpdepth => comp to 2.1 be = bipole(rec=[[7000, 8000, 9000], [7050, 8050, 9050], [100, 100, 100], [100, 100, 100], 2.5, 2.5], **inp) bf = bipole(rec=[[7000, 8000, 9000], [7050, 8050, 9050], [100, 100, 100], [100, 100, 100], 2.5, 2.5], recpts=10, **inp) bg = bipole(rec=[[7000, 8000, 9000], [7050, 8050, 9050], [100, 100, 100], [100, 100, 100], 0, 5], **inp) bh = bipole(rec=[[7000, 8000, 9000], [7050, 8050, 9050], [100, 100, 100], [100, 100, 100], 0, 5], recpts=10, **inp)
assert_allclose(be[:, 0], ba)
numpy.testing.assert_allclose
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Oct 22 17:10:20 2020 @author: andreas """ from Basefolder import basefolder from Geometry_Grid import Geometry_Grid from datetime import datetime import numpy as np from Finder_1d import Finder_1d import matplotlib.pyplot as plt import scipy.spatial.distance as dist import seaborn as sns plt.rcParams['axes.facecolor'] = 'w'; #**************************** # Parameters threshold = 10; points_per_dimension = 15; #vary for Fig. S19 #**************************** def PlotScatter(labels,XC,ax=[],showScaleBar=False,showBorder=False): # Get correctly detected: correct_detected = np.ones_like(labels); if(ax == []): fig,ax = plt.figure(); mark = (labels==-1); sns.scatterplot(x=XC[mark,0],y=XC[mark,1],color='grey',alpha=0.2,ax=ax); mark = (labels>=0); sns.scatterplot(x=XC[mark,0],y=XC[mark,1],hue=labels[mark],palette='Set1', size=0.2,style=-1*correct_detected[mark],legend=False,ax=ax); ax.set_aspect('equal'); x_0 = 0; y_0 = np.min(XC[:,1]) - 80; if(showScaleBar): ax.plot([x_0,x_0+100],[y_0,y_0],'k') ax.annotate('$100nm$',(x_0+50,y_0+10),fontsize='large',ha='center'); else: ax.plot([x_0,x_0+100],[y_0,y_0],'w') ax.set_aspect(1); ax.set_xticks([]); ax.set_yticks([]); ax.axis('off'); if(ax==[]): plt.show(); for dbscanType in ['dbscan','DbscanLoop']: for name_idx in ["FigS3","FigS4"]: name = 'Case'+str(name_idx)+'_'+dbscanType; if(name_idx == "FigS4"): params = {'n_side':5, 'seed':1, 'Delta_ratio':.8, 'noise_ratio':1., 'unit_type':'Clusters_DNA_1mers'};#"Clusters_DNA_1mers";#"Clusters_Neuron"; elif(name_idx == "FigS3"): params = {'n_side':5, 'seed':1, 'Delta_ratio':0.8, 'noise_ratio':1.5, 'unit_type':'Clusters_Neuron'};#"Clusters_DNA_1mers";#"Clusters_Neuron"; #**************************** now = datetime.now() date_time = now.strftime("%Y_%m_%d_%H_%M_%S"); filename_dataframe = "Results_"+date_time+".txt"; #basefolder = "Results/"; G = Geometry_Grid(basefolder,params['unit_type'], n_side=params['n_side'], Delta_ratio=params['Delta_ratio'], noise_ratio=params['noise_ratio']); G.GeneratePoints(params['seed']); #Test: What does testset look like? G.PlotScatter(basefolder+name+"_Groundtruth.pdf"); XC = G.XC; FD = Finder_1d(algo=dbscanType,threshold=threshold,points_per_dimension=points_per_dimension);#,points_per_dimension=20); labels = FD.fit(XC); #********************************************* threshold = FD.threshold; sigmas = np.asarray(FD.data['sigmas']); sigma_opt = FD.selected_parameters['sigma']; index_opt =
np.where(sigmas==sigma_opt)
numpy.where
""" This module to performs linear analysis of coupled lattices. Notation is the same as in reference [3] In case some strange results appear in phase advances or beta functions, the reading of [2] is encouraged, since the authors discuss subtleties not addressed here for strong coupled motion. References: [1] <NAME>., & <NAME>. (1973). Parametrization of Linear Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279 [2] <NAME>., & <NAME>. (1999). Linear analysis of coupled lattices. Physical Review Special Topics - Accelerators and Beams, 2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001 [3] <NAME>, Some Useful Linear Coupling Approximations. C-A/AP/#101 Brookhaven Nat. Lab. (July 2003) """ import numpy as _np from mathphys.functions import get_namedtuple as _get_namedtuple from .. import lattice as _lattice from .. import tracking as _tracking from ..utils import interactive as _interactive from .miscellaneous import OpticsException as _OpticsException class EdwardsTeng(_np.record): """Edwards and Teng decomposition of the transfer matrices. Notation is the same as in reference [3]. In case some strange results appear in phase advances or beta functions, the reading of [2] is encouraged, since the authors discuss subtleties not addressed here for strong coupled motion. References: [1] <NAME>., & <NAME>. (1973). Parametrization of Linear Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279 [2] <NAME>., & <NAME>. (1999). Linear analysis of coupled lattices. Physical Review Special Topics - Accelerators and Beams, 2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001 [3] <NAME>, Some Useful Linear Coupling Approximations. C-A/AP/#101 Brookhaven Nat. Lab. (July 2003) Contains the decomposed parameters: spos (array, len(indices)x2x2) : longitudinal position [m] beta1 (array, len(indices)) : beta of first eigen-mode beta2 (array, len(indices)) : beta of second eigen-mode alpha1 (array, len(indices)) : alpha of first eigen-mode alpha2 (array, len(indices)) : alpha of second eigen-mode gamma1 (array, len(indices)) : gamma of first eigen-mode gamma2 (array, len(indices)) : gamma of second eigen-mode mu1 (array, len(indices)): phase advance of the first eigen-mode mu2 (array, len(indices)): phase advance of the second eigen-mode W (array, len(indices)x2x2) : matrices W in [3] """ DTYPE = '<f8' ORDER = _get_namedtuple('Order', field_names=[ 'spos', 'beta1', 'alpha1', 'mu1', 'beta2', 'alpha2', 'mu2', 'W_11', 'W_12', 'W_21', 'W_22', 'eta1', 'etap1', 'eta2', 'etap2', 'rx', 'px', 'ry', 'py', 'de', 'dl']) def __setattr__(self, attr, val): """.""" if attr == 'co': self._set_co(val) else: super().__setattr__(attr, val) def __str__(self): """.""" rst = '' rst += 'spos : '+'{0:+10.3e}'.format(self.spos) fmt = '{0:+10.3e}, {1:+10.3e}' rst += '\nrx, ry : '+fmt.format(self.rx, self.ry) rst += '\npx, py : '+fmt.format(self.px, self.py) rst += '\nde, dl : '+fmt.format(self.de, self.dl) rst += '\nmu1, mu2 : '+fmt.format(self.mu1, self.mu2) rst += '\nbeta1, beta2 : '+fmt.format(self.beta1, self.beta2) rst += '\nalpha1, alpha2: '+fmt.format(self.alpha1, self.alpha2) rst += '\neta1, eta2 : '+fmt.format(self.eta1, self.eta2) rst += '\netap1, etap2 : '+fmt.format(self.etap1, self.etap2) return rst @property def co(self): """Closed-Orbit in XY plane coordinates. Returns: numpy.ndarray (6, ): 6D phase space point around matrices were calculated. """ return _np.array([ self.rx, self.px, self.ry, self.py, self.de, self.dl]) @property def W(self): """2D mixing matrix from ref [3]. Returns: numpy.ndarray (2x2): W matrix from ref [3]. """ return _np.array([[self.W_11, self.W_12], [self.W_21, self.W_22]]) @W.setter def W(self, val): self[EdwardsTeng.ORDER.W_11] = val[0, 0] self[EdwardsTeng.ORDER.W_12] = val[0, 1] self[EdwardsTeng.ORDER.W_21] = val[1, 0] self[EdwardsTeng.ORDER.W_22] = val[1, 1] @property def d(self): """Parameter d from ref [3], calculated via equation 81. Returns: float: d from ref [3]. """ return _np.sqrt(1 - _np.linalg.det(self.W)) @property def R(self): """4D matrix that transforms from normal modes to XY plane. Returns: numpy.ndarray (4x4): R matrix from ref [3]. """ deyes = self.d * _np.eye(2) return _np.block([ [deyes, _symplectic_transpose(self.W)], [-self.W, deyes]]) @property def Rinv(self): """4D matrix that transforms from XY plane to normal modes. Returns: numpy.ndarray (4x4): Rinv matrix from ref [3]. """ deyes = self.d * _np.eye(2) return _np.block([ [deyes, -_symplectic_transpose(self.W)], [self.W, deyes]]) def from_normal_modes(self, pos): """Transform from normal modes to XY plane. Args: pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in normal modes coordinates. Returns: pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in XY coordinates. """ pos = pos.copy() pos[:4] = self.R @ pos[:4] return pos def to_normal_modes(self, pos): """Transform from XY plane to normal modes. Args: pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in XY coordinates. Returns: pos (numpy.ndarray): (4, N) or (6, N) positions in phase space in normal mode coordinates. """ pos = pos.copy() pos[:4] = self.Rinv @ pos[:4] return pos def make_dict(self): """.""" cod = self.co beta = [self.beta1, self.beta2] alpha = [self.alpha1, self.alpha2] eta = [self.eta1, self.eta2] etap = [self.etap1, self.etap2] mus = [self.mu1, self.mu2] return { 'co': cod, 'beta': beta, 'alpha': alpha, 'eta': eta, 'etap': etap, 'mu': mus} @staticmethod def make_new(*args, **kwrgs): """Build a Twiss object.""" if args: if isinstance(args[0], dict): kwrgs = args[0] twi = EdwardsTengArray(1) cod = kwrgs.get('co', (0.0,)*6) twi['rx'], twi['px'], twi['ry'], twi['py'], twi['de'], twi['dl'] = cod twi['mu1'], twi['mu2'] = kwrgs.get('mu', (0.0, 0.0)) twi['beta1'], twi['beta2'] = kwrgs.get('beta', (0.0, 0.0)) twi['alpha1'], twi['alpha2'] = kwrgs.get('alpha', (0.0, 0.0)) twi['eta1'], twi['eta2'] = kwrgs.get('eta', (0.0, 0.0)) twi['etap1'], twi['etap2'] = kwrgs.get('etap', (0.0, 0.0)) return twi[0] def _set_co(self, value): """.""" try: leng = len(value) except TypeError: leng = 6 value = [value, ]*leng if leng != 6: raise ValueError('closed orbit must have 6 elements.') self[EdwardsTeng.ORDER.rx] = value[0] self[EdwardsTeng.ORDER.px] = value[1] self[EdwardsTeng.ORDER.ry] = value[2] self[EdwardsTeng.ORDER.py] = value[3] self[EdwardsTeng.ORDER.de] = value[4] self[EdwardsTeng.ORDER.dl] = value[5] class EdwardsTengArray(_np.ndarray): """Array of Edwards and Teng objects. Notation is the same as in reference [3] In case some strange results appear in phase advances or beta functions, the reading of [2] is encouraged, since the authors discuss subtleties not addressed here for strong coupled motion. References: [1] <NAME>., & <NAME>. (1973). Parametrization of Linear Coupled Motion in Periodic Systems. IEEE Transactions on Nuclear Science, 20(3), 885–888. https://doi.org/10.1109/TNS.1973.4327279 [2] <NAME>., & <NAME>. (1999). Linear analysis of coupled lattices. Physical Review Special Topics - Accelerators and Beams, 2(7), 22–26. https://doi.org/10.1103/physrevstab.2.074001 [3] <NAME>, Some Useful Linear Coupling Approximations. C-A/AP/#101 Brookhaven Nat. Lab. (July 2003) Contains the decomposed parameters: spos (array, len(indices)x2x2) : longitudinal position [m] beta1 (array, len(indices)) : beta of first eigen-mode beta2 (array, len(indices)) : beta of second eigen-mode alpha1 (array, len(indices)) : alpha of first eigen-mode alpha2 (array, len(indices)) : alpha of second eigen-mode gamma1 (array, len(indices)) : gamma of first eigen-mode gamma2 (array, len(indices)) : gamma of second eigen-mode mu1 (array, len(indices)): phase advance of the first eigen-mode mu2 (array, len(indices)): phase advance of the second eigen-mode L1 (array, len(indices)x2x2) : matrices L1 in [3] L2 (array, len(indices)x2x2) : matrices L2 in [3] W (array, len(indices)x2x2) : matrices W in [3] d (array, len(indices)): d parameter in [3] """ def __eq__(self, other): """.""" return _np.all(super().__eq__(other)) def __new__(cls, edteng=None, copy=True): """.""" length = 1 if isinstance(edteng, (int, _np.int)): length = edteng edteng = None elif isinstance(edteng, EdwardsTengArray): return edteng.copy() if copy else edteng if edteng is None: arr = _np.zeros( (length, len(EdwardsTeng.ORDER)), dtype=EdwardsTeng.DTYPE) elif isinstance(edteng, _np.ndarray): arr = edteng.copy() if copy else edteng elif isinstance(edteng, _np.record): arr = _np.ndarray( (edteng.size, len(EdwardsTeng.ORDER)), buffer=edteng.data) arr = arr.copy() if copy else arr fmts = [(fmt, EdwardsTeng.DTYPE) for fmt in EdwardsTeng.ORDER._fields] return super().__new__( cls, shape=(arr.shape[0], ), dtype=(EdwardsTeng, fmts), buffer=arr) @property def spos(self): """.""" return self['spos'] @spos.setter def spos(self, value): self['spos'] = value @property def beta1(self): """.""" return self['beta1'] @beta1.setter def beta1(self, value): self['beta1'] = value @property def alpha1(self): """.""" return self['alpha1'] @alpha1.setter def alpha1(self, value): self['alpha1'] = value @property def gamma1(self): """.""" return (1 + self['alpha1']*self['alpha1'])/self['beta1'] @property def mu1(self): """.""" return self['mu1'] @mu1.setter def mu1(self, value): self['mu1'] = value @property def beta2(self): """.""" return self['beta2'] @beta2.setter def beta2(self, value): self['beta2'] = value @property def alpha2(self): """.""" return self['alpha2'] @alpha2.setter def alpha2(self, value): self['alpha2'] = value @property def gamma2(self): """.""" return (1 + self['alpha2']*self['alpha2'])/self['beta2'] @property def mu2(self): """.""" return self['mu2'] @mu2.setter def mu2(self, value): self['mu2'] = value @property def W_11(self): """.""" return self['W_11'] @W_11.setter def W_11(self, val): self['W_11'] = val @property def W_12(self): """.""" return self['W_12'] @W_12.setter def W_12(self, val): self['W_12'] = val @property def W_21(self): """.""" return self['W_21'] @W_21.setter def W_21(self, val): self['W_21'] = val @property def W_22(self): """.""" return self['W_22'] @W_22.setter def W_22(self, val): self['W_22'] = val @property def W(self): """2D mixing matrix from ref [3]. Returns: numpy.ndarray (Nx2x2): W matrix from ref [3]. """ mat =
_np.zeros((self.W_11.size, 2, 2))
numpy.zeros
import collections.abc import numbers import warnings import numpy import scopyon._epifm import scopyon.config import scopyon.image from logging import getLogger _log = getLogger(__name__) __all__ = [ "EnvironSettings", "EPIFMSimulator", "form_image", "generate_images", "create_simulator" ] class EnvironSettings: def __init__(self, config): self.initialize(config) def initialize(self, config): self.processes = config.processes class EPIFMSimulator(object): def __init__(self, config=None, method=None, rng=None): """ Constructor Args: config (Configuration or str, optional): Configurations. The default is None. method (str, optional): A name of method used. The default is None (config.default). rng (numpy.RandomState, optional): A random number generator. The default is None. """ if config is None: config = scopyon.config.Configuration() elif isinstance(config, str): config = scopyon.config.Configuration(filename=config) elif not isinstance(config, scopyon.config.Configuration): raise TypeError("Configuration or str must be given [{}].".format(type(config))) if rng is None: warnings.warn('A random number generator is not given.') rng = numpy.random.RandomState() self.__config = config self.__method = method or config.default.lower() self.__rng = rng def base(self): return scopyon._epifm._EPIFMSimulator( configs=scopyon._epifm.EPIFMConfigs(self.__config[self.__method], rng=self.__rng), environ=EnvironSettings(self.__config.environ)) def __format_data(self, inputs): assert isinstance(inputs, numpy.ndarray) if inputs.ndim != 2: raise ValueError("The given 'inputs' has wrong dimension.") if inputs.shape[1] == 2: data = numpy.hstack(( numpy.zeros((inputs.shape[0], 1)), inputs * self.__config.preprocessing.scale)) data = numpy.hstack(( data, numpy.zeros((data.shape[0], 2)))) data[:, 3] = numpy.arange(inputs.shape[0]) #FIXME: Molecule ID data[:, 4] = 1.0 # Photon state elif inputs.shape[1] == 3: origin = numpy.array(self.__config.preprocessing.origin) data = inputs * self.__config.preprocessing.scale - origin unit_z = numpy.cross( self.__config.preprocessing.unit_x, self.__config.preprocessing.unit_y) data = numpy.hstack(( numpy.dot(data, unit_z).reshape((-1, 1)), numpy.dot(data, self.__config.preprocessing.unit_x).reshape((-1, 1)), numpy.dot(data, self.__config.preprocessing.unit_y).reshape((-1, 1)), )) data = numpy.hstack(( data, numpy.zeros((data.shape[0], 2)))) data[:, 3] = numpy.arange(inputs.shape[0]) #FIXME: Molecule ID data[:, 4] = 1.0 # Photon state elif inputs.shape[1] == 4: data = numpy.hstack(( numpy.zeros((inputs.shape[0], 1)), inputs[:, : 2] * self.__config.preprocessing.scale, inputs[:, 2: ])) elif inputs.shape[1] == 5: origin = numpy.array(self.__config.preprocessing.origin) data = inputs[:, : 3] * self.__config.preprocessing.scale - origin unit_z = numpy.cross( self.__config.preprocessing.unit_x, self.__config.preprocessing.unit_y) data = numpy.hstack((
numpy.dot(data, unit_z)
numpy.dot
import scipy.spatial as ss from scipy.special import digamma import numpy as np import math # CONTINUOUS ESTIMATORS def entropy(x, k=3, base=math.e, noise_level=1e-10): """ The classic K-L k-nearest neighbor continuous entropy estimator. """ assert k < x.shape[0] d = x.shape[1] n = x.shape[0] x += np.random.rand(*x.shape) * noise_level tree = ss.cKDTree(x) knn_distance = tree.query(x, [k + 1], p=float('inf'))[0] const = digamma(n) - digamma(k) + d * math.log(2) return (const + d * np.mean(np.log(knn_distance))) / math.log(base) def conditional_entropy(x, y, k=3, base=math.e): """ The classic K-L k-nearest neighbor continuous entropy estimator for the entropy of X conditioned on Y. """ assert x.shape[0] == y.shape[0] h_xy = entropy(np.concatenate((x, y), axis=-1), k, base) h_y = entropy(y, k, base) return h_xy - h_y def mutual_information(x, y, k=3, base=math.e, noise_level=1e-10): """ Mutual information of x and y. """ assert x.shape[0] == y.shape[0] assert k < x.shape[0] x += np.random.rand(*x.shape) * noise_level y +=
np.random.rand(*y.shape)
numpy.random.rand
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from abc import ABCMeta, abstractmethod import numpy as np import unittest CARDINAL_DIRECTIONS = ("N", "E", "S", "W") FNAME = "input.txt" TEST_FNAME = "test_input.txt" def main(): """Main function.""" data = load_input(FNAME) part1(data) part2(data) print("\nUnittests") unittest.main() def part1(data): """Solution to day 12, part 1.""" ship = Ship() for instruction in data: ship.action(instruction) distance = ship.manhattan_distance() print(f"The Manhattan distance is {distance}.") return distance def part2(data): """Solution to day 12, part 2.""" ship = ShipWithWaypoint() for instruction in data: ship.action(instruction) distance = ship.manhattan_distance() print(f"The Manhattan distance is {distance}.") return distance def load_input(fname): """Read in the data, return as a list.""" with open(fname, "r") as f: data = f.readlines() return data class FloatingObject(metaclass=ABCMeta): """An object in a two-dimensional space. The following convention for position is used: north (N) and east (E) are assigned to positive values, south (S) and west (W) to negative values. Following typical conventions, we formulate the coordinates as a (latitude, longitude) pair. Example ------- Initial position: (0, 0) Moving N10 results in a new position (10, 0). Following this, moving S20 results in a new position (-10, 0). """ def __init__(self, initial_position): """Create an object with an initial position.""" # Call np.asarray twice to create two deepcopys. self.initial_position = np.asarray(initial_position) self.current_position = np.asarray(initial_position) def update_position(self, direction, value): """Update the object's position.""" if direction == "N": self.current_position[0] += value return if direction == "S": self.current_position[0] -= value return if direction == "E": self.current_position[1] += value return if direction == "W": self.current_position[1] -= value return msg = "No valid direction indicated!" raise ValueError(msg) class Ship(FloatingObject): """A ship moving in a two-dimensional space.""" def __init__(self, initial_direction="E", initial_position=[0,0]): super().__init__(initial_position) self.current_direction_idx = CARDINAL_DIRECTIONS.index(initial_direction) def action(self, instruction): """Perform an action, which can either be a movement or a rotation.""" direction = instruction[0] value = int(instruction[1:]) if direction in ("L", "R"): self.update_direction(direction, value) else: self.update_position(direction, value) def update_position(self, direction, value): """Update the position of the ship.""" if direction == "F": direction = CARDINAL_DIRECTIONS[self.current_direction_idx] super().update_position(direction, value) def update_direction(self, direction, degrees): """Update the direction by rotating X degrees to the left or right. Note that currently, 'degrees' must be a multiple of 90. """ if abs(degrees) % 90 != 0: msg = f"'degrees' is not a multiple of 90. degrees is: {degrees}" raise ValueError(msg) if direction not in ("L", "R"): msg = "'direction' must be 'L' or 'R'." raise ValueError(msg) degrees = -1 * degrees if direction == "L" else degrees self.current_direction_idx += degrees // 90 self.current_direction_idx %= len(CARDINAL_DIRECTIONS) def manhattan_distance(self): """Return the sum of the absolute values of E/W and N/S position.""" distance = np.abs(self.current_position - self.initial_position).sum() return distance class Waypoint(FloatingObject): def __init__(self, initial_position): super().__init__(initial_position) def rotate_waypoint(self, direction, value): """Rotate the waypoint around the ship.""" value = -1 * value if direction == "L" else value value = np.radians(value) rotation_matrix = np.array([[np.cos(value), -
np.sin(value)
numpy.sin
import unittest from numpy import hstack, max, abs, ones, zeros, sum, sqrt from cantera import Solution, one_atm, gas_constant import numpy as np from spitfire import ChemicalMechanismSpec from os.path import join, abspath from subprocess import getoutput test_mech_directory = abspath(join('tests', 'test_mechanisms', 'old_xmls')) mechs = [x.replace('.xml', '') for x in getoutput('ls ' + test_mech_directory + ' | grep .xml').split('\n')] def validate_on_mechanism(mech, temperature, pressure, test_rhs=True, test_jac=True): xml = join(test_mech_directory, mech + '.xml') T = temperature p = pressure r = ChemicalMechanismSpec(xml, 'gas').griffon gas = Solution(xml) ns = gas.n_species gas.TPX = T, p, ones(ns) y = gas.Y state = hstack((T, y[:-1])) rhsGR = np.empty(ns) r.reactor_rhs_isobaric(state, p, 0., np.ndarray(1), 0, 0, 0, 0, 0, 0, 0, False, rhsGR) if test_jac: Tin, yin, tau = 0, np.ndarray(1), 0 rhsTmp = np.empty(ns) jacGR = np.empty(ns * ns) r.reactor_jac_isobaric(state, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, 0, 0, rhsTmp, jacGR) jacGR = jacGR.reshape((ns, ns), order='F') dT = 1.e-6 dY = 1.e-6 jacFD = np.empty((ns, ns)) rhsGR1, rhsGR2 = np.empty(ns), np.empty(ns) state_m = hstack((T - dT, y[:-1])) state_p = hstack((T + dT, y[:-1])) r.reactor_rhs_isobaric(state_m, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR1) r.reactor_rhs_isobaric(state_p, p, Tin, yin, tau, 0, 0, 0, 0, 0, 0, False, rhsGR2) jacFD[:, 0] = (- rhsGR1 + rhsGR2) / (2. * dT) for i in range(ns - 1): y_m1, y_p1 = np.copy(y),
np.copy(y)
numpy.copy
import numpy as np def iterative_cumulative_shape_measure( pos, weights, rmin=0.0, rmax=1.0, ITER_MAX=10, TOL=0.01 ): """ Measure the shape of a halo within a given aperture Arguments: -pos : ARRAY containing 3D particle positions -weights : ARRAY containing particle weight (typically mass) -rmin : Minimum extent of aperture [FLOAT] -rmax : Maximum extent of aperture [FLOAT] -ITER_MAX : INTEGER specifying the maximum number of iterations -TOL : Tolerence that defines convergence [FLOAT] Returns: -Q : Ratio of semi-major to major axes [FLOAT] -S : Ratio of minor to major axes [FLOAT] -Iv : Eigenvectors of final shape [2D ARRAY] """ # Perform intial inertial tensor and shape calculation r = np.sqrt((pos ** 2.0).sum(axis=-1)) rdx = np.where((r > rmin) & (r <= rmax))[0] p = np.copy(pos) w = np.copy(weights) Iten = compute_inertial_tensor(p[rdx], w[rdx]) Ivalues, Ivectors = compute_eigenvalues_and_vectors(Iten) q = Ivalues[1] / Ivalues[0] s = Ivalues[2] / Ivalues[0] # Now iterate for j in range(0, ITER_MAX, 1): # Rotate into frame RM = Ivectors.T p_rot = rotate_vectors_cm(RM, p) p = np.copy(p_rot) # Reselect those still within aperture r = np.sqrt( p_rot[:, 0] ** 2.0 + (p_rot[:, 1] / q) ** 2.0 + (p_rot[:, 2] / s) ** 2.0 ) rdx = np.where((r > rmin) & (r <= rmax))[0] # New inertial tensor, shape calc. Iten = compute_inertial_tensor(p_rot[rdx], w[rdx]) Ivalues, Ivectors = compute_eigenvalues_and_vectors(Iten) # Compare updated shape values, break if converged q_new = Ivalues[1] / Ivalues[0] s_new = Ivalues[2] / Ivalues[0] if abs((q_new - q) / q) < TOL and abs((s_new - s) / s) < TOL: q = q_new s = s_new break else: q = q_new s = s_new return q, s, Ivectors def iterative_radial_shape_profile( pos, weights, R200, rmin=0.05, rmax=5.0, Nb=25, ITER_MAX=10, TOL=0.01, IBzero=True ): """ Measure halo shape in radial annuli Arguments: -pos : ARRAY containing 3D particle positions -weights : ARRAY containing particle weight (typically mass) -R200 : Halo aperture to normalize radial bins [FLOAT] -rmin : Minimum extent of radial profile [FLOAT] -rmax : Maximum extent of radial profile [FLOAT] -Nb : Number of radial bins [INTEGER] -ITER_MAX : INTEGER specifying the maximum number of iterations -TOL : Tolerence that defines convergence [FLOAT] -IBzero : BOOLEAN, if TRUE reset inner most bin edge to zero Returns: -Q : Ratio of semi-major to major axes [ARRAY] -S : Ratio of minor to major axes [ARRAY] -Iv : Eigenvectors of final shapes [3D ARRAY] """ # Set up radial bins -- zero inner most edge bins = np.logspace(
np.log10(rmin)
numpy.log10
from torchvision.models.resnet import resnet18 from cifar10_models import resnet import torchvision.transforms as T import torch from torchvision.datasets import CIFAR10 from torch.utils.data import DataLoader import numpy as np from time import time from math import ceil import os import argparse from common_keys import * from deployment_utils import save_onnx_model, onnx_to_tensorrt, \ initialize_tensorrt_model, tensorrt_inference, to_numpy,\ prepare_quantization_model, implement_calibration, print_size_of_model from pytorch_quantization import nn as quant_nn from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor from pytorch_quantization import quant_modules import torch.nn.utils.prune as prune def load_dataloader(batch_size, is_train=False): mean = [0.4914, 0.4822, 0.4465] std = [0.2471, 0.2435, 0.2616] transform = T.Compose( [ T.ToTensor(), T.Normalize(mean, std), ] ) dataset = CIFAR10("/home/ek21/remote-pycharm/PyTorch_CIFAR10/data", train=is_train, transform=transform, download=True) dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=0, drop_last=True, pin_memory=True, ) return dataloader def main_func(opset_version, precision, batch_size, inference_type, model, quantization_type=None, calibration_type=None, number_of_calibration_samples=None): resnet_models = resnet.__dict__ MODEL = model weight_file = "weights/" device = "cuda" batch_size = batch_size number_of_calibration_batch = 0 if precision == INT8 and quantization_type is not None and calibration_type is not None: number_of_calibration_batch = ceil(number_of_calibration_samples / batch_size) number_of_calibration_samples = batch_size * number_of_calibration_batch quantization_mode = True naming = f"{MODEL}_{quantization_type}_{calibration_type}_ncs_{number_of_calibration_samples}_op{opset_version}_batchsize{batch_size}" else: quantization_mode = False naming = f"{MODEL}_{precision}_op{opset_version}_batchsize{batch_size}" tensorrt_file = f"{weight_file}{naming}.{TENSORRT_EXTENSION}" onnx_file = f"{weight_file}{naming}.{ONNX_EXTENSION}" val_loader = load_dataloader(batch_size, is_train=False) train_loader = load_dataloader(batch_size, is_train=True) dummy_image = torch.zeros([batch_size, 3, 32, 32]) dummy_image = dummy_image.to(device) prediction_prob = torch.zeros(batch_size, 10) dummy_outputs = {"out": prediction_prob} if inference_type == TENSORRT_INFERENCE: if not os.path.isfile(onnx_file): if quantization_mode: prepare_quantization_model(calibration_type) model = resnet_models[MODEL](pretrained=False, num_classes=10) model.load_state_dict(torch.load(f"./cifar10_models/state_dicts/{MODEL}.pt")) model.to(device) model.eval() if quantization_mode: implement_calibration(model=model, dataloader=train_loader, device=device, calibration_type=calibration_type, calibration_batch_count=number_of_calibration_batch) if quantization_mode: quant_nn.TensorQuantizer.use_fb_fake_quant = True save_onnx_model(inputs=dummy_image, outputs=dummy_outputs, model=model, onnx_file=onnx_file, opset_version=opset_version) if not os.path.isfile(tensorrt_file): onnx_to_tensorrt(onnx_file=onnx_file, tensorrt_file=tensorrt_file, precision=precision) print_size_of_model(tensorrt_file) context, bindings, device_input, device_tensorrt_outs, stream, host_tensorrt_outs = \ initialize_tensorrt_model(tensorrt_file=tensorrt_file, image=to_numpy(dummy_image), output_names=["out"], outputs=dummy_outputs) elif inference_type == TORCH_INFERENCE: model = resnet_models[MODEL](pretrained=False, num_classes=10) model.load_state_dict(torch.load("./cifar10_models/state_dicts/resnet18.pt")) model.to(device) model.eval() accuracy_list = [] time_list = [] with torch.no_grad(): for batch_index, (image, label) in enumerate(val_loader): start_time = time() if inference_type == TENSORRT_INFERENCE: tensorrt_inference(device_input=device_input, context=context, bindings=bindings, device_tensorrt_outs=device_tensorrt_outs, stream=stream, image=image, host_tensorrt_outs=host_tensorrt_outs) prediction_prob = host_tensorrt_outs["out"] elif inference_type == TORCH_INFERENCE: image = image.to(device) # label = label.to(device) prediction_prob = model(image) prediction_prob = to_numpy(prediction_prob) predictions = prediction_prob.argmax(1) label = to_numpy(label) end_time = time() inference_time = end_time - start_time if batch_index != 0: time_list.append(inference_time) number_of_corrects = np.sum(predictions == label) accuracy_list.append(number_of_corrects / batch_size) accuracy = np.mean(accuracy_list) print(f"average accuracy is {accuracy}") mean_inference_time =
np.mean(time_list)
numpy.mean
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt data = pd.read_csv("data.csv") data.info() """ Data columns (total 33 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 569 non-null int64 . . . 32 Unnamed: 32 0 non-null float64 """ data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True) # data.head(10) data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis] y = data.diagnosis.values x_data = data.drop(["diagnosis"], axis = 1) # %% Normalization x_normalized = (x_data - np.min(x_data)) / (
np.max(x_data)
numpy.max
"""InVEST Nutrient Delivery Ratio (NDR) module.""" import itertools import logging import os import pickle import numpy import pygeoprocessing import pygeoprocessing.routing from osgeo import gdal, ogr import taskgraph from .. import utils, validation, MODEL_METADATA from . import ndr_core LOGGER = logging.getLogger(__name__) ARGS_SPEC = { "model_name": MODEL_METADATA["ndr"].model_title, "pyname": MODEL_METADATA["ndr"].pyname, "userguide_html": MODEL_METADATA["ndr"].userguide, "args_with_spatial_overlap": { "spatial_keys": ["dem_path", "lulc_path", "runoff_proxy_path", "watersheds_path"], "different_projections_ok": True, }, "args": { "workspace_dir": validation.WORKSPACE_SPEC, "results_suffix": validation.SUFFIX_SPEC, "n_workers": validation.N_WORKERS_SPEC, "dem_path": { "type": "raster", "required": True, "validation_options": { "projected": True, }, "about": ( "A GDAL-supported raster file containing elevation values for " "each cell. Make sure the DEM is corrected by filling in " "sinks, and if necessary burning hydrographic features into " "the elevation model (recommended when unusual streams are " "observed.) See the Working with the DEM section of the " "InVEST User's Guide for more information."), "name": "DEM" }, "lulc_path": { "type": "raster", "required": True, "validation_options": { "projected": True, }, "about": ( "A GDAL-supported raster file containing integer values " "representing the LULC code for each cell. The LULC code " "should be an integer."), "name": "Land Use" }, "runoff_proxy_path": { "type": "raster", "required": True, "about": ( "Weighting factor to nutrient loads. Internally this value " "is normalized by its average values so a variety of data " "can be used including precipitation or quickflow."), "name": "Nutrient Runoff Proxy" }, "watersheds_path": { "type": "vector", "required": True, "validation_options": { "required_fields": ['ws_id'], "projected": True, }, "about": ( "A GDAL-supported vector file containing watersheds such " "that each watershed contributes to a point of interest " "where water quality will be analyzed. It must have the " "integer field 'ws_id' where the values uniquely identify " "each watershed."), "name": "Watersheds" }, "biophysical_table_path": { "validation_options": { "required_fields": ["lucode"], }, "type": "csv", "required": True, "about": ( "A CSV table containing model information corresponding to " "each of the land use classes in the LULC raster input. It " "must contain the fields 'lucode', 'load_n' (or p), 'eff_n' " "(or p), and 'crit_len_n' (or p) depending on which " "nutrients are selected."), "name": "Biophysical Table" }, "calc_p": { "type": "boolean", "required": True, "about": "Select to calculate phosphorus export.", "name": "Calculate phosphorus retention" }, "calc_n": { "type": "boolean", "required": True, "about": "Select to calculate nitrogen export.", "name": "Calculate Nitrogen Retention" }, "threshold_flow_accumulation": { "validation_options": { "expression": "value > 0", }, "type": "number", "required": True, "about": ( "The number of upstream cells that must flow into a cell " "before it's considered part of a stream such that " "retention stops and the remaining export is exported to the " "stream. Used to define streams from the DEM."), "name": "Threshold Flow Accumulation" }, "k_param": { "type": "number", "required": True, "about": ( "Calibration parameter that determines the shape of the " "relationship between hydrologic connectivity (the degree of " "connection from patches of land to the stream) and the " "nutrient delivery ratio (percentage of nutrient that " "actually reaches the stream)"), "name": "Borselli k parameter", }, "subsurface_critical_length_n": { "type": "number", "required": "calc_n", "name": "Subsurface Critical Length (Nitrogen)", "about": ( "The distance (traveled subsurface and downslope) after " "which it is assumed that soil retains nutrient at its " "maximum capacity, given in meters. If dissolved nutrients " "travel a distance smaller than Subsurface Critical Length, " "the retention efficiency will be lower than the Subsurface " "Maximum Retention Efficiency value defined. Setting this " "value to a distance smaller than the pixel size will result " "in the maximum retention efficiency being reached within " "one pixel only."), }, "subsurface_critical_length_p": { "type": "number", "required": "calc_p", "name": "Subsurface Critical Length (Phosphorus)", "about": ( "The distance (traveled subsurface and downslope) after " "which it is assumed that soil retains nutrient at its " "maximum capacity, given in meters. If dissolved nutrients " "travel a distance smaller than Subsurface Critical Length, " "the retention efficiency will be lower than the Subsurface " "Maximum Retention Efficiency value defined. Setting this " "value to a distance smaller than the pixel size will result " "in the maximum retention efficiency being reached within " "one pixel only."), }, "subsurface_eff_n": { "type": "number", "required": "calc_n", "name": "Subsurface Maximum Retention Efficiency (Nitrogen)", "about": ( "The maximum nutrient retention efficiency that can be " "reached through subsurface flow, a floating point value " "between 0 and 1. This field characterizes the retention due " "to biochemical degradation in soils."), }, "subsurface_eff_p": { "type": "number", "required": "calc_p", "name": "Subsurface Maximum Retention Efficiency (Phosphorus)", "about": ( "The maximum nutrient retention efficiency that can be " "reached through subsurface flow, a floating point value " "between 0 and 1. This field characterizes the retention due " "to biochemical degradation in soils."), } } } _OUTPUT_BASE_FILES = { 'n_export_path': 'n_export.tif', 'p_export_path': 'p_export.tif', 'watershed_results_ndr_path': 'watershed_results_ndr.shp', } INTERMEDIATE_DIR_NAME = 'intermediate_outputs' _INTERMEDIATE_BASE_FILES = { 'ic_factor_path': 'ic_factor.tif', 'load_n_path': 'load_n.tif', 'load_p_path': 'load_p.tif', 'modified_load_n_path': 'modified_load_n.tif', 'modified_load_p_path': 'modified_load_p.tif', 'ndr_n_path': 'ndr_n.tif', 'ndr_p_path': 'ndr_p.tif', 'runoff_proxy_index_path': 'runoff_proxy_index.tif', 's_accumulation_path': 's_accumulation.tif', 's_bar_path': 's_bar.tif', 's_factor_inverse_path': 's_factor_inverse.tif', 'stream_path': 'stream.tif', 'sub_crit_len_n_path': 'sub_crit_len_n.tif', 'sub_crit_len_p_path': 'sub_crit_len_p.tif', 'sub_eff_n_path': 'sub_eff_n.tif', 'sub_eff_p_path': 'sub_eff_p.tif', 'sub_effective_retention_n_path': 'sub_effective_retention_n.tif', 'sub_effective_retention_p_path': 'sub_effective_retention_p.tif', 'sub_load_n_path': 'sub_load_n.tif', 'sub_load_p_path': 'sub_load_p.tif', 'surface_load_n_path': 'surface_load_n.tif', 'surface_load_p_path': 'surface_load_p.tif', 'sub_ndr_n_path': 'sub_ndr_n.tif', 'sub_ndr_p_path': 'sub_ndr_p.tif', 'crit_len_n_path': 'crit_len_n.tif', 'crit_len_p_path': 'crit_len_p.tif', 'd_dn_path': 'd_dn.tif', 'd_up_path': 'd_up.tif', 'eff_n_path': 'eff_n.tif', 'eff_p_path': 'eff_p.tif', 'effective_retention_n_path': 'effective_retention_n.tif', 'effective_retention_p_path': 'effective_retention_p.tif', 'flow_accumulation_path': 'flow_accumulation.tif', 'flow_direction_path': 'flow_direction.tif', 'thresholded_slope_path': 'thresholded_slope.tif', 'dist_to_channel_path': 'dist_to_channel.tif', } _CACHE_BASE_FILES = { 'filled_dem_path': 'filled_dem.tif', 'aligned_dem_path': 'aligned_dem.tif', 'slope_path': 'slope.tif', 'aligned_lulc_path': 'aligned_lulc.tif', 'aligned_runoff_proxy_path': 'aligned_runoff_proxy.tif', 'runoff_mean_pickle_path': 'runoff_mean.pickle', 'surface_load_n_pickle_path': 'surface_load_n.pickle', 'surface_load_p_pickle_path': 'surface_load_p.pickle', 'subsurface_load_n_pickle_path': 'subsurface_load_n.pickle', 'subsurface_load_p_pickle_path': 'subsurface_load_p.pickle', 'export_n_pickle_path': 'export_n.pickle', 'export_p_pickle_path': 'export_p.pickle', } _TARGET_NODATA = -1 def execute(args): """Nutrient Delivery Ratio. Args: args['workspace_dir'] (string): path to current workspace args['dem_path'] (string): path to digital elevation map raster args['lulc_path'] (string): a path to landcover map raster args['runoff_proxy_path'] (string): a path to a runoff proxy raster args['watersheds_path'] (string): path to the watershed shapefile args['biophysical_table_path'] (string): path to csv table on disk containing nutrient retention values. For each nutrient type [t] in args['calc_[t]'] that is true, must contain the following headers: 'load_[t]', 'eff_[t]', 'crit_len_[t]' If args['calc_n'] is True, must also contain the header 'proportion_subsurface_n' field. args['calc_p'] (boolean): if True, phosphorus is modeled, additionally if True then biophysical table must have p fields in them args['calc_n'] (boolean): if True nitrogen will be modeled, additionally biophysical table must have n fields in them. args['results_suffix'] (string): (optional) a text field to append to all output files args['threshold_flow_accumulation']: a number representing the flow accumulation in terms of upstream pixels. args['k_param'] (number): The Borselli k parameter. This is a calibration parameter that determines the shape of the relationship between hydrologic connectivity. args['subsurface_critical_length_n'] (number): The distance (traveled subsurface and downslope) after which it is assumed that soil retains nutrient at its maximum capacity, given in meters. If dissolved nutrients travel a distance smaller than Subsurface Critical Length, the retention efficiency will be lower than the Subsurface Maximum Retention Efficiency value defined. Setting this value to a distance smaller than the pixel size will result in the maximum retention efficiency being reached within one pixel only. Required if ``calc_n``. args['subsurface_critical_length_p'] (number): The distance (traveled subsurface and downslope) after which it is assumed that soil retains nutrient at its maximum capacity, given in meters. If dissolved nutrients travel a distance smaller than Subsurface Critical Length, the retention efficiency will be lower than the Subsurface Maximum Retention Efficiency value defined. Setting this value to a distance smaller than the pixel size will result in the maximum retention efficiency being reached within one pixel only. Required if ``calc_p``. args['subsurface_eff_n'] (number): The maximum nutrient retention efficiency that can be reached through subsurface flow, a floating point value between 0 and 1. This field characterizes the retention due to biochemical degradation in soils. Required if ``calc_n``. args['subsurface_eff_p'] (number): The maximum nutrient retention efficiency that can be reached through subsurface flow, a floating point value between 0 and 1. This field characterizes the retention due to biochemical degradation in soils. Required if ``calc_p``. args['n_workers'] (int): if present, indicates how many worker processes should be used in parallel processing. -1 indicates single process mode, 0 is single process but non-blocking mode, and >= 1 is number of processes. Returns: None """ def _validate_inputs(nutrients_to_process, lucode_to_parameters): """Validate common errors in inputs. Args: nutrients_to_process (list): list of 'n' and/or 'p' lucode_to_parameters (dictionary): biophysical input table mapping lucode to dictionary of table parameters. Used to validate the correct columns are input Returns: None Raises: ValueError whenever a missing field in the parameter table is detected along with a message describing every missing field. """ # Make sure all the nutrient inputs are good if len(nutrients_to_process) == 0: raise ValueError("Neither phosphorus nor nitrogen was selected" " to be processed. Choose at least one.") # Build up a list that'll let us iterate through all the input tables # and check for the required rows, and report errors if something # is missing. row_header_table_list = [] lu_parameter_row = list(lucode_to_parameters.values())[0] row_header_table_list.append( (lu_parameter_row, ['load_', 'eff_', 'crit_len_'], args['biophysical_table_path'])) missing_headers = [] for row, header_prefixes, table_type in row_header_table_list: for nutrient_id in nutrients_to_process: for header_prefix in header_prefixes: header = header_prefix + nutrient_id if header not in row: missing_headers.append( "Missing header %s from %s" % ( header, table_type)) # proportion_subsurface_n is a special case in which phosphorus does # not have an equivalent. if ('n' in nutrients_to_process and 'proportion_subsurface_n' not in lu_parameter_row): missing_headers.append( "Missing header proportion_subsurface_n from " + args['biophysical_table_path']) if len(missing_headers) > 0: raise ValueError('\n'.join(missing_headers)) # Load all the tables for preprocessing output_dir = os.path.join(args['workspace_dir']) intermediate_output_dir = os.path.join( args['workspace_dir'], INTERMEDIATE_DIR_NAME) cache_dir = os.path.join(intermediate_output_dir, 'cache_dir') utils.make_directories([output_dir, intermediate_output_dir, cache_dir]) try: n_workers = int(args['n_workers']) except (KeyError, ValueError, TypeError): # KeyError when n_workers is not present in args # ValueError when n_workers is an empty string. # TypeError when n_workers is None. n_workers = -1 # Synchronous mode. task_graph = taskgraph.TaskGraph( cache_dir, n_workers, reporting_interval=5.0) file_suffix = utils.make_suffix_string(args, 'results_suffix') f_reg = utils.build_file_registry( [(_OUTPUT_BASE_FILES, output_dir), (_INTERMEDIATE_BASE_FILES, intermediate_output_dir), (_CACHE_BASE_FILES, cache_dir)], file_suffix) # Build up a list of nutrients to process based on what's checked on nutrients_to_process = [] for nutrient_id in ['n', 'p']: if args['calc_' + nutrient_id]: nutrients_to_process.append(nutrient_id) lucode_to_parameters = utils.build_lookup_from_csv( args['biophysical_table_path'], 'lucode') _validate_inputs(nutrients_to_process, lucode_to_parameters) # these are used for aggregation in the last step field_pickle_map = {} field_header_order_list = [] create_vector_task = task_graph.add_task( func=create_vector_copy, args=(args['watersheds_path'], f_reg['watershed_results_ndr_path']), target_path_list=[f_reg['watershed_results_ndr_path']], task_name='create target vector') dem_info = pygeoprocessing.get_raster_info(args['dem_path']) base_raster_list = [ args['dem_path'], args['lulc_path'], args['runoff_proxy_path']] aligned_raster_list = [ f_reg['aligned_dem_path'], f_reg['aligned_lulc_path'], f_reg['aligned_runoff_proxy_path']] align_raster_task = task_graph.add_task( func=pygeoprocessing.align_and_resize_raster_stack, args=( base_raster_list, aligned_raster_list, ['near']*len(base_raster_list), dem_info['pixel_size'], 'intersection'), kwargs={ 'base_vector_path_list': [args['watersheds_path']], 'vector_mask_options': { 'mask_vector_path': args['watersheds_path']}}, target_path_list=aligned_raster_list, task_name='align rasters') fill_pits_task = task_graph.add_task( func=pygeoprocessing.routing.fill_pits, args=( (f_reg['aligned_dem_path'], 1), f_reg['filled_dem_path']), kwargs={'working_dir': cache_dir}, dependent_task_list=[align_raster_task], target_path_list=[f_reg['filled_dem_path']], task_name='fill pits') flow_dir_task = task_graph.add_task( func=pygeoprocessing.routing.flow_dir_mfd, args=( (f_reg['filled_dem_path'], 1), f_reg['flow_direction_path']), kwargs={'working_dir': cache_dir}, dependent_task_list=[fill_pits_task], target_path_list=[f_reg['flow_direction_path']], task_name='flow dir') flow_accum_task = task_graph.add_task( func=pygeoprocessing.routing.flow_accumulation_mfd, args=( (f_reg['flow_direction_path'], 1), f_reg['flow_accumulation_path']), target_path_list=[f_reg['flow_accumulation_path']], dependent_task_list=[flow_dir_task], task_name='flow accum') stream_extraction_task = task_graph.add_task( func=pygeoprocessing.routing.extract_streams_mfd, args=( (f_reg['flow_accumulation_path'], 1), (f_reg['flow_direction_path'], 1), float(args['threshold_flow_accumulation']), f_reg['stream_path']), target_path_list=[f_reg['stream_path']], dependent_task_list=[flow_accum_task], task_name='stream extraction') calculate_slope_task = task_graph.add_task( func=pygeoprocessing.calculate_slope, args=((f_reg['filled_dem_path'], 1), f_reg['slope_path']), target_path_list=[f_reg['slope_path']], dependent_task_list=[fill_pits_task], task_name='calculate slope') threshold_slope_task = task_graph.add_task( func=_slope_proportion_and_threshold, args=(f_reg['slope_path'], f_reg['thresholded_slope_path']), target_path_list=[f_reg['thresholded_slope_path']], dependent_task_list=[calculate_slope_task], task_name='threshold slope') runoff_proxy_index_task = task_graph.add_task( func=_normalize_raster, args=((f_reg['aligned_runoff_proxy_path'], 1), f_reg['runoff_proxy_index_path']), target_path_list=[f_reg['runoff_proxy_index_path']], dependent_task_list=[align_raster_task], task_name='runoff proxy mean') s_task = task_graph.add_task( func=pygeoprocessing.routing.flow_accumulation_mfd, args=((f_reg['flow_direction_path'], 1), f_reg['s_accumulation_path']), kwargs={ 'weight_raster_path_band': (f_reg['thresholded_slope_path'], 1)}, target_path_list=[f_reg['s_accumulation_path']], dependent_task_list=[flow_dir_task, threshold_slope_task], task_name='route s') s_bar_task = task_graph.add_task( func=s_bar_calculate, args=(f_reg['s_accumulation_path'], f_reg['flow_accumulation_path'], f_reg['s_bar_path']), target_path_list=[f_reg['s_bar_path']], dependent_task_list=[s_task, flow_accum_task], task_name='calculate s bar') d_up_task = task_graph.add_task( func=d_up_calculation, args=(f_reg['s_bar_path'], f_reg['flow_accumulation_path'], f_reg['d_up_path']), target_path_list=[f_reg['d_up_path']], dependent_task_list=[s_bar_task, flow_accum_task], task_name='d up') s_inv_task = task_graph.add_task( func=invert_raster_values, args=(f_reg['thresholded_slope_path'], f_reg['s_factor_inverse_path']), target_path_list=[f_reg['s_factor_inverse_path']], dependent_task_list=[threshold_slope_task], task_name='s inv') d_dn_task = task_graph.add_task( func=pygeoprocessing.routing.distance_to_channel_mfd, args=( (f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1), f_reg['d_dn_path']), kwargs={'weight_raster_path_band': ( f_reg['s_factor_inverse_path'], 1)}, dependent_task_list=[stream_extraction_task, s_inv_task], target_path_list=[f_reg['d_dn_path']], task_name='d dn') dist_to_channel_task = task_graph.add_task( func=pygeoprocessing.routing.distance_to_channel_mfd, args=( (f_reg['flow_direction_path'], 1), (f_reg['stream_path'], 1), f_reg['dist_to_channel_path']), dependent_task_list=[stream_extraction_task], target_path_list=[f_reg['dist_to_channel_path']], task_name='dist to channel') ic_task = task_graph.add_task( func=calculate_ic, args=( f_reg['d_up_path'], f_reg['d_dn_path'], f_reg['ic_factor_path']), target_path_list=[f_reg['ic_factor_path']], dependent_task_list=[d_dn_task, d_up_task], task_name='calc ic') for nutrient in nutrients_to_process: load_path = f_reg['load_%s_path' % nutrient] modified_load_path = f_reg['modified_load_%s_path' % nutrient] # Perrine says that 'n' is the only case where we could consider a # prop subsurface component. So there's a special case for that. if nutrient == 'n': subsurface_proportion_type = 'proportion_subsurface_n' else: subsurface_proportion_type = None load_task = task_graph.add_task( func=_calculate_load, args=( f_reg['aligned_lulc_path'], lucode_to_parameters, 'load_%s' % nutrient, load_path), dependent_task_list=[align_raster_task], target_path_list=[load_path], task_name='%s load' % nutrient) modified_load_task = task_graph.add_task( func=_multiply_rasters, args=([load_path, f_reg['runoff_proxy_index_path']], _TARGET_NODATA, modified_load_path), target_path_list=[modified_load_path], dependent_task_list=[load_task, runoff_proxy_index_task], task_name='modified load %s' % nutrient) surface_load_path = f_reg['surface_load_%s_path' % nutrient] surface_load_task = task_graph.add_task( func=_map_surface_load, args=(modified_load_path, f_reg['aligned_lulc_path'], lucode_to_parameters, subsurface_proportion_type, surface_load_path), target_path_list=[surface_load_path], dependent_task_list=[modified_load_task, align_raster_task], task_name='map surface load %s' % nutrient) subsurface_load_path = f_reg['sub_load_%s_path' % nutrient] subsurface_load_task = task_graph.add_task( func=_map_subsurface_load, args=(modified_load_path, f_reg['aligned_lulc_path'], lucode_to_parameters, subsurface_proportion_type, subsurface_load_path), target_path_list=[subsurface_load_path], dependent_task_list=[modified_load_task, align_raster_task], task_name='map subsurface load %s' % nutrient) eff_path = f_reg['eff_%s_path' % nutrient] eff_task = task_graph.add_task( func=_map_lulc_to_val_mask_stream, args=( f_reg['aligned_lulc_path'], f_reg['stream_path'], lucode_to_parameters, 'eff_%s' % nutrient, eff_path), target_path_list=[eff_path], dependent_task_list=[align_raster_task, stream_extraction_task], task_name='ret eff %s' % nutrient) crit_len_path = f_reg['crit_len_%s_path' % nutrient] crit_len_task = task_graph.add_task( func=_map_lulc_to_val_mask_stream, args=( f_reg['aligned_lulc_path'], f_reg['stream_path'], lucode_to_parameters, 'crit_len_%s' % nutrient, crit_len_path), target_path_list=[crit_len_path], dependent_task_list=[align_raster_task, stream_extraction_task], task_name='ret eff %s' % nutrient) effective_retention_path = ( f_reg['effective_retention_%s_path' % nutrient]) ndr_eff_task = task_graph.add_task( func=ndr_core.ndr_eff_calculation, args=( f_reg['flow_direction_path'], f_reg['stream_path'], eff_path, crit_len_path, effective_retention_path), target_path_list=[effective_retention_path], dependent_task_list=[ stream_extraction_task, eff_task, crit_len_task], task_name='eff ret %s' % nutrient) ndr_path = f_reg['ndr_%s_path' % nutrient] ndr_task = task_graph.add_task( func=_calculate_ndr, args=( effective_retention_path, f_reg['ic_factor_path'], float(args['k_param']), ndr_path), target_path_list=[ndr_path], dependent_task_list=[ndr_eff_task, ic_task], task_name='calc ndr %s' % nutrient) sub_ndr_path = f_reg['sub_ndr_%s_path' % nutrient] sub_ndr_task = task_graph.add_task( func=_calculate_sub_ndr, args=( float(args['subsurface_eff_%s' % nutrient]), float(args['subsurface_critical_length_%s' % nutrient]), f_reg['dist_to_channel_path'], sub_ndr_path), target_path_list=[sub_ndr_path], dependent_task_list=[dist_to_channel_task], task_name='sub ndr %s' % nutrient) export_path = f_reg['%s_export_path' % nutrient] calculate_export_task = task_graph.add_task( func=_calculate_export, args=( surface_load_path, ndr_path, subsurface_load_path, sub_ndr_path, export_path), target_path_list=[export_path], dependent_task_list=[ load_task, ndr_task, surface_load_task, subsurface_load_task, sub_ndr_task], task_name='export %s' % nutrient) aggregate_export_task = task_graph.add_task( func=_aggregate_and_pickle_total, args=( (export_path, 1), f_reg['watershed_results_ndr_path'], f_reg['export_%s_pickle_path' % nutrient]), target_path_list=[f_reg['export_%s_pickle_path' % nutrient]], dependent_task_list=[calculate_export_task], task_name='aggregate %s export' % nutrient) aggregate_surface_load_task = task_graph.add_task( func=_aggregate_and_pickle_total, args=( (surface_load_path, 1), f_reg['watershed_results_ndr_path'], f_reg['surface_load_%s_pickle_path' % nutrient]), target_path_list=[f_reg['surface_load_%s_pickle_path' % nutrient]], dependent_task_list=[surface_load_task, create_vector_task], task_name='aggregate %s surface load' % nutrient) aggregate_subsurface_load_task = task_graph.add_task( func=_aggregate_and_pickle_total, args=( (subsurface_load_path, 1), f_reg['watershed_results_ndr_path'], f_reg['subsurface_load_%s_pickle_path' % nutrient]), target_path_list=[ f_reg['subsurface_load_%s_pickle_path' % nutrient]], dependent_task_list=[subsurface_load_task, create_vector_task], task_name='aggregate %s subsurface load' % nutrient) field_pickle_map['surf_%s_ld' % nutrient] = ( f_reg['surface_load_%s_pickle_path' % nutrient]) field_pickle_map['sub_%s_ld' % nutrient] = ( f_reg['subsurface_load_%s_pickle_path' % nutrient]) field_pickle_map['%s_exp_tot' % nutrient] = ( f_reg['export_%s_pickle_path' % nutrient]) field_header_order_list = ( [x % nutrient for x in [ 'surf_%s_ld', 'sub_%s_ld', '%s_exp_tot']] + field_header_order_list) task_graph.close() task_graph.join() LOGGER.info('Writing summaries to output shapefile') _add_fields_to_shapefile( field_pickle_map, field_header_order_list, f_reg['watershed_results_ndr_path']) LOGGER.info(r'NDR complete!') LOGGER.info(r' _ _ ____ ____ ') LOGGER.info(r' | \ |"| | _"\U | _"\ u ') LOGGER.info(r'<| \| |>/| | | |\| |_) |/ ') LOGGER.info(r'U| |\ |uU| |_| |\| _ < ') LOGGER.info(r' |_| \_| |____/ u|_| \_\ ') LOGGER.info(r' || \\,-.|||_ // \\_ ') LOGGER.info(r' (_") (_/(__)_) (__) (__) ') def _slope_proportion_and_threshold(slope_path, target_threshold_slope_path): """Rescale slope to proportion and threshold to between 0.005 and 1.0. Args: slope_path (string): a raster with slope values in percent. target_threshold_slope_path (string): generated raster with slope values as a proportion (100% is 1.0) and thresholded to values between 0.005 and 1.0. Returns: None. """ slope_nodata = pygeoprocessing.get_raster_info(slope_path)['nodata'][0] def _slope_proportion_and_threshold_op(slope): """Rescale and threshold slope between 0.005 and 1.0.""" valid_mask = slope != slope_nodata result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = slope_nodata slope_fraction = slope[valid_mask] / 100 slope_fraction[slope_fraction < 0.005] = 0.005 slope_fraction[slope_fraction > 1.0] = 1.0 result[valid_mask] = slope_fraction return result pygeoprocessing.raster_calculator( [(slope_path, 1)], _slope_proportion_and_threshold_op, target_threshold_slope_path, gdal.GDT_Float32, slope_nodata) def _add_fields_to_shapefile( field_pickle_map, field_header_order, target_vector_path): """Add fields and values to an OGR layer open for writing. Args: field_pickle_map (dict): maps field name to a pickle file that is a result of pygeoprocessing.zonal_stats with FIDs that match `target_vector_path`. field_header_order (list of string): a list of field headers in the order to appear in the output table. target_vector_path (string): path to target vector file. Returns: None. """ target_vector = gdal.OpenEx( target_vector_path, gdal.OF_VECTOR | gdal.GA_Update) target_layer = target_vector.GetLayer() field_summaries = {} for field_name in field_header_order: field_def = ogr.FieldDefn(field_name, ogr.OFTReal) field_def.SetWidth(24) field_def.SetPrecision(11) target_layer.CreateField(field_def) with open(field_pickle_map[field_name], 'rb') as pickle_file: field_summaries[field_name] = pickle.load(pickle_file) for feature in target_layer: fid = feature.GetFID() for field_name in field_header_order: feature.SetField( field_name, float(field_summaries[field_name][fid]['sum'])) # Save back to datasource target_layer.SetFeature(feature) target_layer = None target_vector = None @validation.invest_validator def validate(args, limit_to=None): """Validate args to ensure they conform to `execute`'s contract. Args: args (dict): dictionary of key(str)/value pairs where keys and values are specified in `execute` docstring. limit_to (str): (optional) if not None indicates that validation should only occur on the args[limit_to] value. The intent that individual key validation could be significantly less expensive than validating the entire `args` dictionary. Returns: list of ([invalid key_a, invalid_keyb, ...], 'warning/error message') tuples. Where an entry indicates that the invalid keys caused the error message in the second part of the tuple. This should be an empty list if validation succeeds. """ validation_warnings = validation.validate( args, ARGS_SPEC['args'], ARGS_SPEC['args_with_spatial_overlap']) invalid_keys = validation.get_invalid_keys(validation_warnings) LOGGER.debug('Starting logging for biophysical table') if 'biophysical_table_path' not in invalid_keys: # Check required fields given the state of ``calc_n`` and ``calc_p`` required_fields = ARGS_SPEC['args'][ 'biophysical_table_path']['validation_options'][ 'required_fields'][:] nutrients_selected = set() for nutrient_letter in ('n', 'p'): do_nutrient_key = 'calc_%s' % nutrient_letter if do_nutrient_key in args and args[do_nutrient_key]: nutrients_selected.add(do_nutrient_key) required_fields += [ 'load_%s' % nutrient_letter, 'eff_%s' % nutrient_letter, 'crit_len_%s' % nutrient_letter, ] if not nutrients_selected: validation_warnings.append( (['calc_n', 'calc_p'], 'Either calc_n or calc_p must be True')) LOGGER.debug('Required keys in CSV: %s', required_fields) error_msg = validation.check_csv( args['biophysical_table_path'], required_fields=required_fields) LOGGER.debug('Error: %s', error_msg) if error_msg: validation_warnings.append( (['biophysical_table_path'], error_msg)) return validation_warnings def _normalize_raster(base_raster_path_band, target_normalized_raster_path): """Calculate normalize raster by dividing by the mean value. Args: base_raster_path_band (tuple): raster path/band tuple to calculate mean. target_normalized_raster_path (string): path to target normalized raster from base_raster_path_band. Returns: None. """ value_sum = 0.0 value_count = 0.0 base_nodata = pygeoprocessing.get_raster_info( base_raster_path_band[0])['nodata'][base_raster_path_band[1]-1] for _, raster_block in pygeoprocessing.iterblocks( base_raster_path_band): valid_mask = slice(None) if base_nodata is not None: valid_mask = ~numpy.isclose(raster_block, base_nodata) valid_block = raster_block[valid_mask] value_sum += numpy.sum(valid_block) value_count += valid_block.size value_mean = value_sum if value_count > 0.0: value_mean /= value_count def _normalize_raster_op(array): """Divide values by mean.""" result = numpy.empty(array.shape, dtype=numpy.float32) result[:] = numpy.float32(base_nodata) valid_mask = slice(None) if base_nodata is not None: valid_mask = ~numpy.isclose(array, base_nodata) result[valid_mask] = array[valid_mask] if value_mean != 0: result[valid_mask] /= value_mean return result # It's possible for base_nodata to extend outside what can be represented # in a float32, yet GDAL expects a python float. Casting to numpy.float32 # and back to a python float allows for the nodata value to reflect the # actual nodata pixel values. target_nodata = float(numpy.float32(base_nodata)) pygeoprocessing.raster_calculator( [base_raster_path_band], _normalize_raster_op, target_normalized_raster_path, gdal.GDT_Float32, target_nodata) def _calculate_load( lulc_raster_path, lucode_to_parameters, load_type, target_load_raster): """Calculate load raster by mapping landcover and multiplying by area. Args: lulc_raster_path (string): path to integer landcover raster. lucode_to_parameters (dict): a mapping of landcover IDs to a dictionary indexed by the value of `load_{load_type}` that represents a per-area nutrient load. load_type (string): represent nutrient to map, either 'load_n' or 'load_p'. target_load_raster (string): path to target raster that will have total load per pixel. Returns: None. """ lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path) nodata_landuse = lulc_raster_info['nodata'][0] cell_area_ha = abs(numpy.prod(lulc_raster_info['pixel_size'])) * 0.0001 def _map_load_op(lucode_array): """Convert unit load to total load & handle nodata.""" result = numpy.empty(lucode_array.shape) result[:] = _TARGET_NODATA for lucode in numpy.unique(lucode_array): if lucode != nodata_landuse: try: result[lucode_array == lucode] = ( lucode_to_parameters[lucode][load_type] * cell_area_ha) except KeyError: raise KeyError( 'lucode: %d is present in the landuse raster but ' 'missing from the biophysical table' % lucode) return result pygeoprocessing.raster_calculator( [(lulc_raster_path, 1)], _map_load_op, target_load_raster, gdal.GDT_Float32, _TARGET_NODATA) def _multiply_rasters(raster_path_list, target_nodata, target_result_path): """Multiply the rasters in `raster_path_list`. Args: raster_path_list (list): list of single band raster paths. target_nodata (float): desired target nodata value. target_result_path (string): path to float 32 target raster multiplied where all rasters are not nodata. Returns: None. """ def _mult_op(*array_nodata_list): """Multiply non-nodata stacks.""" result = numpy.empty(array_nodata_list[0].shape) result[:] = target_nodata valid_mask = numpy.full(result.shape, True) for array, nodata in zip(*[iter(array_nodata_list)]*2): if nodata is not None: valid_mask &= ~numpy.isclose(array, nodata) result[valid_mask] = array_nodata_list[0][valid_mask] for array in array_nodata_list[2::2]: result[valid_mask] *= array[valid_mask] return result # make a list of (raster_path_band, nodata) tuples, then flatten it path_nodata_list = list(itertools.chain(*[ ((path, 1), (pygeoprocessing.get_raster_info(path)['nodata'][0], 'raw')) for path in raster_path_list])) pygeoprocessing.raster_calculator( path_nodata_list, _mult_op, target_result_path, gdal.GDT_Float32, target_nodata) def _map_surface_load( modified_load_path, lulc_raster_path, lucode_to_parameters, subsurface_proportion_type, target_surface_load_path): """Calculate surface load from landcover raster. Args: modified_load_path (string): path to modified load raster with units of kg/pixel. lulc_raster_path (string): path to landcover raster. lucode_to_parameters (dict): maps landcover codes to a dictionary that can be indexed by `subsurface_proportion_type`. subsurface_proportion_type (string): if None no subsurface transfer is mapped. Otherwise indexed from lucode_to_parameters. target_surface_load_path (string): path to target raster. Returns: None. """ lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path) nodata_landuse = lulc_raster_info['nodata'][0] keys = sorted(numpy.array(list(lucode_to_parameters))) if subsurface_proportion_type is not None: subsurface_values = numpy.array( [lucode_to_parameters[x][subsurface_proportion_type] for x in keys]) def _map_surface_load_op(lucode_array, modified_load_array): """Convert unit load to total load & handle nodata.""" # If we don't have subsurface, just return 0.0. if subsurface_proportion_type is None: return numpy.where( lucode_array != nodata_landuse, modified_load_array, _TARGET_NODATA) valid_mask = lucode_array != nodata_landuse result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA index = numpy.digitize( lucode_array[valid_mask].ravel(), keys, right=True) result[valid_mask] = ( modified_load_array[valid_mask] * (1 - subsurface_values[index])) return result pygeoprocessing.raster_calculator( [(lulc_raster_path, 1), (modified_load_path, 1)], _map_surface_load_op, target_surface_load_path, gdal.GDT_Float32, _TARGET_NODATA) def _map_subsurface_load( modified_load_path, lulc_raster_path, lucode_to_parameters, subsurface_proportion_type, target_sub_load_path): """Calculate subsurface load from landcover raster. Args: modified_load_path (string): path to modified load raster. lulc_raster_path (string): path to landcover raster. lucode_to_parameters (dict): maps landcover codes to a dictionary that can be indexed by by `subsurface_proportion_type`. subsurface_proportion_type (string): if None no subsurface transfer is mapped. Otherwise indexed from lucode_to_parameters. target_sub_load_path (string): path to target raster. Returns: None. """ lulc_raster_info = pygeoprocessing.get_raster_info(lulc_raster_path) nodata_landuse = lulc_raster_info['nodata'][0] keys = sorted(numpy.array(list(lucode_to_parameters))) if subsurface_proportion_type is not None: subsurface_permeance_values = numpy.array( [lucode_to_parameters[x][subsurface_proportion_type] for x in keys]) def _map_subsurface_load_op(lucode_array, modified_load_array): """Convert unit load to total load & handle nodata.""" # If we don't have subsurface, just return 0.0. if subsurface_proportion_type is None: return numpy.where( lucode_array != nodata_landuse, 0, _TARGET_NODATA) valid_mask = lucode_array != nodata_landuse result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA index = numpy.digitize( lucode_array[valid_mask].ravel(), keys, right=True) result[valid_mask] = ( modified_load_array[valid_mask] * subsurface_permeance_values[index]) return result pygeoprocessing.raster_calculator( [(lulc_raster_path, 1), (modified_load_path, 1)], _map_subsurface_load_op, target_sub_load_path, gdal.GDT_Float32, _TARGET_NODATA) def _map_lulc_to_val_mask_stream( lulc_raster_path, stream_path, lucode_to_parameters, map_id, target_eff_path): """Make retention efficiency raster from landcover. Args: lulc_raster_path (string): path to landcover raster. stream_path (string) path to stream layer 0, no stream 1 stream. lucode_to_parameters (dict) mapping of landcover code to a dictionary that contains the key in `map_id` map_id (string): the id in the lookup table with values to map landcover to efficiency. target_eff_path (string): target raster that contains the mapping of landcover codes to retention efficiency values except where there is a stream in which case the retention efficiency is 0. Returns: None. """ keys = sorted(numpy.array(list(lucode_to_parameters))) values = numpy.array( [lucode_to_parameters[x][map_id] for x in keys]) nodata_landuse = pygeoprocessing.get_raster_info( lulc_raster_path)['nodata'][0] nodata_stream = pygeoprocessing.get_raster_info(stream_path)['nodata'][0] def _map_eff_op(lucode_array, stream_array): """Map efficiency from LULC and handle nodata/streams.""" valid_mask = ( (lucode_array != nodata_landuse) & (stream_array != nodata_stream)) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA index = numpy.digitize( lucode_array[valid_mask].ravel(), keys, right=True) result[valid_mask] = ( values[index] * (1 - stream_array[valid_mask])) return result pygeoprocessing.raster_calculator( ((lulc_raster_path, 1), (stream_path, 1)), _map_eff_op, target_eff_path, gdal.GDT_Float32, _TARGET_NODATA) def s_bar_calculate( s_accumulation_path, flow_accumulation_path, target_s_bar_path): """Calculate bar op which is s/flow.""" s_nodata = pygeoprocessing.get_raster_info( s_accumulation_path)['nodata'][0] flow_nodata = pygeoprocessing.get_raster_info( flow_accumulation_path)['nodata'][0] def _bar_op(s_accumulation, flow_accumulation): """Calculate bar operation of s_accum / flow_accum.""" valid_mask = ( (s_accumulation != s_nodata) & (flow_accumulation != flow_nodata)) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA result[valid_mask] = ( s_accumulation[valid_mask] / flow_accumulation[valid_mask]) return result pygeoprocessing.raster_calculator( ((s_accumulation_path, 1), (flow_accumulation_path, 1)), _bar_op, target_s_bar_path, gdal.GDT_Float32, _TARGET_NODATA) def d_up_calculation(s_bar_path, flow_accum_path, target_d_up_path): """Calculate d_up = s_bar * sqrt(upstream area).""" s_bar_info = pygeoprocessing.get_raster_info(s_bar_path) s_bar_nodata = s_bar_info['nodata'][0] flow_accum_nodata = pygeoprocessing.get_raster_info( flow_accum_path)['nodata'][0] cell_area_m2 = abs(numpy.prod(s_bar_info['pixel_size'])) def _d_up_op(s_bar, flow_accumulation): """Calculate d_up index.""" valid_mask = ( (s_bar != s_bar_nodata) & (flow_accumulation != flow_accum_nodata)) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA result[valid_mask] = ( s_bar[valid_mask] * numpy.sqrt( flow_accumulation[valid_mask] * cell_area_m2)) return result pygeoprocessing.raster_calculator( [(s_bar_path, 1), (flow_accum_path, 1)], _d_up_op, target_d_up_path, gdal.GDT_Float32, _TARGET_NODATA) def invert_raster_values(base_raster_path, target_raster_path): """Invert (1/x) the values in `base`. Args: base_raster_path (string): path to floating point raster. target_raster_path (string): path to created output raster whose values are 1/x of base. Returns: None. """ base_nodata = pygeoprocessing.get_raster_info( base_raster_path)['nodata'][0] def _inverse_op(base_val): """Calculate inverse of S factor.""" result = numpy.empty(base_val.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA valid_mask = slice(None) if base_nodata is not None: valid_mask = ~numpy.isclose(base_val, base_nodata) zero_mask = base_val == 0.0 result[valid_mask & ~zero_mask] = ( 1.0 / base_val[valid_mask & ~zero_mask]) result[zero_mask] = 0.0 return result pygeoprocessing.raster_calculator( ((base_raster_path, 1),), _inverse_op, target_raster_path, gdal.GDT_Float32, _TARGET_NODATA) def calculate_ic(d_up_path, d_dn_path, target_ic_path): """Calculate IC as log_10(d_up/d_dn).""" ic_nodata = float(numpy.finfo(numpy.float32).min) d_up_nodata = pygeoprocessing.get_raster_info(d_up_path)['nodata'][0] d_dn_nodata = pygeoprocessing.get_raster_info(d_dn_path)['nodata'][0] def _ic_op(d_up, d_dn): """Calculate IC0.""" valid_mask = ( (d_up != d_up_nodata) & (d_dn != d_dn_nodata) & (d_up != 0) & (d_dn != 0)) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = ic_nodata result[valid_mask] = numpy.log10(d_up[valid_mask] / d_dn[valid_mask]) return result pygeoprocessing.raster_calculator( [(d_up_path, 1), (d_dn_path, 1)], _ic_op, target_ic_path, gdal.GDT_Float32, ic_nodata) def _calculate_ndr( effective_retention_path, ic_factor_path, k_param, target_ndr_path): """Calculate NDR as a function of Equation 4 in the user's guide.""" ic_factor_raster = gdal.OpenEx(ic_factor_path, gdal.OF_RASTER) ic_factor_band = ic_factor_raster.GetRasterBand(1) ic_min, ic_max, _, _ = ic_factor_band.GetStatistics(0, 1) ic_factor_band = None ic_factor_raster = None ic_0_param = (ic_min + ic_max) / 2.0 effective_retention_nodata = pygeoprocessing.get_raster_info( effective_retention_path)['nodata'][0] ic_nodata = pygeoprocessing.get_raster_info(ic_factor_path)['nodata'][0] def _calculate_ndr_op(effective_retention_array, ic_array): """Calculate NDR.""" valid_mask = ( (effective_retention_array != effective_retention_nodata) & (ic_array != ic_nodata)) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA result[valid_mask] = ( (1.0 - effective_retention_array[valid_mask]) / (1.0 + numpy.exp( (ic_0_param - ic_array[valid_mask]) / k_param))) return result pygeoprocessing.raster_calculator( [(effective_retention_path, 1), (ic_factor_path, 1)], _calculate_ndr_op, target_ndr_path, gdal.GDT_Float32, _TARGET_NODATA) def _calculate_sub_ndr( eff_sub, crit_len_sub, dist_to_channel_path, target_sub_ndr_path): """Calculate subsurface: subndr = eff_sub(1-e^(-5*l/crit_len).""" dist_to_channel_nodata = pygeoprocessing.get_raster_info( dist_to_channel_path)['nodata'][0] def _sub_ndr_op(dist_to_channel_array): """Calculate subsurface NDR.""" # nodata value from this intermediate output should always be # defined by pygeoprocessing, not None valid_mask = ~numpy.isclose( dist_to_channel_array, dist_to_channel_nodata) result = numpy.empty(valid_mask.shape, dtype=numpy.float32) result[:] = _TARGET_NODATA result[valid_mask] = 1.0 - eff_sub * ( 1-numpy.exp(-5*dist_to_channel_array[valid_mask]/crit_len_sub)) return result pygeoprocessing.raster_calculator( [(dist_to_channel_path, 1)], _sub_ndr_op, target_sub_ndr_path, gdal.GDT_Float32, _TARGET_NODATA) def _calculate_export( surface_load_path, ndr_path, subsurface_load_path, subsurface_ndr_path, target_export_path): """Calculate export.""" load_nodata = pygeoprocessing.get_raster_info( surface_load_path)['nodata'][0] subsurface_load_nodata = pygeoprocessing.get_raster_info( subsurface_load_path)['nodata'][0] ndr_nodata = pygeoprocessing.get_raster_info( ndr_path)['nodata'][0] sub_ndr_nodata = pygeoprocessing.get_raster_info( subsurface_ndr_path)['nodata'][0] def _calculate_export_op( modified_load_array, ndr_array, modified_sub_load_array, sub_ndr_array): """Combine NDR and subsurface NDR.""" # these intermediate outputs should always have defined nodata # values assigned by pygeoprocessing valid_mask = ~( numpy.isclose(modified_load_array, load_nodata) | numpy.isclose(ndr_array, ndr_nodata) | numpy.isclose(modified_sub_load_array, subsurface_load_nodata) |
numpy.isclose(sub_ndr_array, sub_ndr_nodata)
numpy.isclose
import warnings from inspect import isclass import numpy as np from UQpy.RunModel import RunModel from UQpy.SampleMethods import * ######################################################################################################################## ######################################################################################################################## # Subset Simulation ######################################################################################################################## class SubsetSimulation: """ Perform Subset Simulation to estimate probability of failure. This class estimates probability of failure for a user-defined model using Subset Simulation. The class can use one of several MCMC algorithms to draw conditional samples. **Input:** * **runmodel_object** (``RunModel`` object): The computational model. It should be of type `RunModel` (see ``RunModel`` class). * **mcmc_class** (Class of type ``SampleMethods.MCMC``) Specifies the MCMC algorithm. Must be a child class of the ``SampleMethods.MCMC`` parent class. Note: This is `not` and object of the class. This input specifies the class itself. * **samples_init** (`ndarray`) A set of samples from the specified probability distribution. These are the samples from the original distribution. They are not conditional samples. The samples must be an array of size `nsamples_per_ss x dimension`. If `samples_init` is not specified, the Subset_Simulation class will use the `mcmc_class` to draw the initial samples. * **p_cond** (`float`): Conditional probability for each conditional level. * **nsamples_per_ss** (`int`) Number of samples to draw in each conditional level. * **max_level** (`int`) Maximum number of allowable conditional levels. * **verbose** (Boolean): A boolean declaring whether to write text to the terminal. * **mcmc_kwargs** (`dict`) Any additional keyword arguments needed for the specific ``MCMC`` class. **Attributes:** * **samples** (`list` of `ndarrays`) A list of arrays containing the samples in each conditional level. * **g** (`list` of `ndarrays`) A list of arrays containing the evaluation of the performance function at each sample in each conditional level. * **g_level** (`list`) Threshold value of the performance function for each conditional level * **pf** (`float`) Probability of failure estimate * **cov1** (`float`) Coefficient of variation of the probability of failure estimate assuming independent chains * **cov2** (`float`) Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_ **Methods:** """ def __init__(self, runmodel_object, mcmc_class=MMH, samples_init=None, p_cond=0.1, nsamples_per_ss=1000, max_level=10, verbose=False, **mcmc_kwargs): # Store the MCMC object to create a new object of this type for each subset self.mcmc_kwargs = mcmc_kwargs self.mcmc_class = mcmc_class # Initialize other attributes self.runmodel_object = runmodel_object self.samples_init = samples_init self.p_cond = p_cond self.nsamples_per_ss = nsamples_per_ss self.max_level = max_level self.verbose = verbose # Check that a RunModel object is being passed in. if not isinstance(self.runmodel_object, RunModel): raise AttributeError( 'UQpy: Subset simulation requires the user to pass a RunModel object') if 'random_state' in self.mcmc_kwargs: self.random_state = self.mcmc_kwargs['random_state'] if isinstance(self.random_state, int): self.random_state = np.random.RandomState(self.random_state) elif not isinstance(self.random_state, (type(None), np.random.RandomState)): raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.') else: self.random_state = None # Perform initial error checks self._init_sus() # Initialize the mcmc_object from the specified class. mcmc_object = self.mcmc_class(**self.mcmc_kwargs) self.mcmc_objects = [mcmc_object] # Initialize new attributes/variables self.samples = list() self.g = list() self.g_level = list() if self.verbose: print('UQpy: Running Subset Simulation with MCMC of type: ' + str(type(mcmc_object))) [self.pf, self.cov1, self.cov2] = self.run() if self.verbose: print('UQpy: Subset Simulation Complete!') # ----------------------------------------------------------------------------------------------------------------------- # The run function executes the chosen subset simulation algorithm def run(self): """ Execute subset simulation This is an instance method that runs subset simulation. It is automatically called when the SubsetSimulation class is instantiated. **Output/Returns:** * **pf** (`float`) Probability of failure estimate * **cov1** (`float`) Coefficient of variation of the probability of failure estimate assuming independent chains * **cov2** (`float`) Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_ """ step = 0 n_keep = int(self.p_cond * self.nsamples_per_ss) d12 = list() d22 = list() # Generate the initial samples - Level 0 # Here we need to make sure that we have good initial samples from the target joint density. if self.samples_init is None: warnings.warn('UQpy: You have not provided initial samples.\n Subset simulation is highly sensitive to the ' 'initial sample set. It is recommended that the user either:\n' '- Provide an initial set of samples (samples_init) known to follow the distribution; or\n' '- Provide a robust MCMC object that will draw independent initial samples from the ' 'distribution.') self.mcmc_objects[0].run(nsamples=self.nsamples_per_ss) self.samples.append(self.mcmc_objects[0].samples) else: self.samples.append(self.samples_init) # Run the model for the initial samples, sort them by their performance function, and identify the # conditional level self.runmodel_object.run(samples=np.atleast_2d(self.samples[step])) self.g.append(np.squeeze(self.runmodel_object.qoi_list)) g_ind = np.argsort(self.g[step]) self.g_level.append(self.g[step][g_ind[n_keep - 1]]) # Estimate coefficient of variation of conditional probability of first level d1, d2 = self._cov_sus(step) d12.append(d1 ** 2) d22.append(d2 ** 2) if self.verbose: print('UQpy: Subset Simulation, conditional level 0 complete.') while self.g_level[step] > 0 and step < self.max_level: # Increment the conditional level step = step + 1 # Initialize the samples and the performance function at the next conditional level self.samples.append(np.zeros_like(self.samples[step - 1])) self.samples[step][:n_keep] = self.samples[step - 1][g_ind[0:n_keep], :] self.g.append(np.zeros_like(self.g[step - 1])) self.g[step][:n_keep] = self.g[step - 1][g_ind[:n_keep]] # Unpack the attributes # Initialize a new MCMC object for each conditional level self.mcmc_kwargs['seed'] = np.atleast_2d(self.samples[step][:n_keep, :]) self.mcmc_kwargs['random_state'] = self.random_state new_mcmc_object = self.mcmc_class(**self.mcmc_kwargs) self.mcmc_objects.append(new_mcmc_object) # Set the number of samples to propagate each chain (n_prop) in the conditional level n_prop_test = self.nsamples_per_ss / self.mcmc_objects[step].nchains if n_prop_test.is_integer(): n_prop = self.nsamples_per_ss // self.mcmc_objects[step].nchains else: raise AttributeError( 'UQpy: The number of samples per subset (nsamples_per_ss) must be an integer multiple of ' 'the number of MCMC chains.') # Propagate each chain n_prop times and evaluate the model to accept or reject. for i in range(n_prop - 1): # Propagate each chain if i == 0: self.mcmc_objects[step].run(nsamples=2 * self.mcmc_objects[step].nchains) else: self.mcmc_objects[step].run(nsamples=self.mcmc_objects[step].nchains) # Decide whether a new simulation is needed for each proposed state a = self.mcmc_objects[step].samples[i * n_keep:(i + 1) * n_keep, :] b = self.mcmc_objects[step].samples[(i + 1) * n_keep:(i + 2) * n_keep, :] test1 = np.equal(a, b) test =
np.logical_and(test1[:, 0], test1[:, 1])
numpy.logical_and
#Program: A self implemented basic kd_tree algorithm import numpy as np import pandas as pd import math import time #class for internal nodes of the kdTree #holds, cut_dim value at each node, along with cut_point itself for reference. #left_child points to the left node of the tree, similarly right_child points to the right node of the tree. class Inode: cut_dim = 0 cut_point = 0 left_child = None right_child = None #init method for Inode class #initializes each node with cut_point, cut_dim, left_child and right_child of the node. def __init__(self,cut_point, cut_dim, left_child, right_child): self.cut_point = cut_point self.cut_dim = cut_dim self.left_child = left_child self.right_child = right_child #__repr__ dundder method for fallback of __str__ dundder method def __repr__(self): return "{},{},{},{}".format(self.cut_point,self.cut_dim,self.left_child,self.right_child) #__str__ dundder method for printing the class instance in string format def __str__(self): return "node:{}, dim:{},<------{},------>{}".format(self.cut_point,self.cut_dim,self.left_child,self.right_child) #class for nodes holding leaf points of the kdTree #point_list, which is all the points in that region(node) of the tree. tree division stops here. class Lnode: point_list = None #init method for Lnode class #initializes each leaf node with points in that node. def __init__(self,point_list): self.point_list = point_list #__repr__ dundder method for fallback of __str__ dundder method def __repr__(self): return "{}".format(self.point_list) #__str__ dundder method for printing the class instance in string format def __str__(self): return "leaf_Points:{}".format(self.point_list) #KDTree function to build a Kd tree of a given point_list, cut_dim for dimension to split on, and max number of points a leaf can hold is given by leaf_size. #splitting stops if a node has points less than the leaf_size def KDTree(point_list, leaf_size, cut_dim=0): #root = None cut_val = 0 left_child = None right_child = None stop = False #If not enough points in the point_list - if(len(point_list) <= leaf_size): stop = True #this tree_node is a leaf node if stop == True: #leaf nodes added root = Lnode(point_list) #otherwise build kdTree recursively else: #checking number of dimensions in data num_dimensions = len(point_list[0]) mid = 0 #sorting method is used for splitting here. view_str = 'f8' for _ in range(1,num_dimensions): view_str += ',f8' #point_list.view(view_str).sort(order=['f'+str(cut_dim)], axis=0) #change sorting here to suit the input data point_list[point_list[:,cut_dim].argsort()] #middle element from sorted list is picked as root to split upn mid = int(len(point_list) / 2) cut_point = point_list[mid].copy() print("cut point ",end=" ") print(cut_point) #next_dimension to split upon next_dim = (cut_dim + 1) % num_dimensions #recursive calls for the nodes(building kdTree) if not a leaf node root = Inode(cut_point, cut_dim, KDTree(point_list[:mid], leaf_size, next_dim), KDTree(point_list[mid:], leaf_size, next_dim)) return root #function to search for k nearest neighbors of a given query_point. #accepts, root node, quer_point, and number of neighbors to search 'k' as parameters def KNN(root, query_point, k): #checking if the node is an internal node, we only compare values according to cut_dim #if root.cut_point[axis] > query_point[axis] we go to the left_child of the tree #otherwise to the right_child and resume search if isinstance(root, Inode): axis = root.cut_dim print(root.cut_point[axis]," query ", query_point) if root.cut_point[axis] > query_point[axis]: KNN(root.left_child, query_point, k) else: KNN(root.right_child, query_point, k) #otherwise if node is a leaf node, we search nearest node among the point_list of that leaf node else: # Creating a distance list from the leaf nodes closest_points = root.point_list #futurework: add root node of leaf nodes also in here for optimal results distance = [] #calculating distance of all the points in leaf node from the query_point and adding into distance list for i in range(len(closest_points)): #Calculating distance (2-d) and adding to list distance, along with the index of the point, i. distance.append([math.sqrt(pow(closest_points[i][0]-query_point[0],2)+pow(closest_points[i][1]-query_point[1],2)),i]) distance = np.array(distance) #sorting the distance array based on the distances in the ascending order, while preserving their index(position) in point_list distance.view('f8,i8').sort(order=['f0'], axis=0) #print(distance) print(str(k)+" Nearest points to the query point"+str(query_point)+"according to the KD_Tree built are:") nn = [] #taking only closest k points and adding into nearest_neighbor list for x in distance[:k]: #use print(x) to print distances of the closest k points #print(x) nn.append(closest_points[int(x[1])]) #printing the knn #print(nn,end="\n\n") print(distance) #function to generate 'num_points' random points of 'dim' dimensions. def generate_data(data_type): if data_type == 1: df = pd.read_csv('DataSets/Lung.txt',sep="\s+",header=None) data_size = df.shape[0] print("Taking Lung(181x12533) as input data") data = df.iloc[:data_size-1, :12533] #converting to numpy array data = np.array(data) elif data_type == 2: df = pd.read_csv('DataSets/Leukimia.txt',sep="\s+",header=None) print("Taking Leukimia(72x7129) as input data") data = df.iloc[:, :7129] #converting to numpy array data = np.array(data) elif data_type == 3: df = pd.read_csv('DataSets/GCM.txt',sep="\s+",header=None) print("Taking GCM(16064x280) as input data") data = df.iloc[:, :280] #converting to numpy array data = np.array(data).transpose() elif data_type == 4: df = pd.read_csv('DataSets/Prostate.txt',sep="\s+",header=None) print("Taking Prostate(181x12600) as input data") data = df.iloc[:, :12600] #converting to numpy array data = np.array(data) else: print("Generating normalized random data(1000x10000) as input data") data = np.random.rand(10000,1000) return data #where execution starts if __name__ == "__main__": print("Building the kdTree using Magnitude of pointVectors with given set of Points:") data_type = int(input("Choose appropriate input\n 1. Lung data set \n 2. Leukimia\n 3. GCM\n 4. Prostate \n 0. randomly generated data:\n")) #calling generate_data() for data to be generated/read. data = generate_data(data_type) n,d,k = 10000,1000,5 if data_type == 1: df = pd.read_csv('DataSets/Lung.txt',sep="\s+",header=None) data_size = df.shape[0] print("Taking Lung(181x12533) as input data") query_point = df.iloc[data_size-1:data_size, :12533] query_point =
np.array(query_point)
numpy.array
import numpy as np from scipy import stats """ Created on Tues Jan 28 11:59 2020 @author: <NAME> ========================================================================= Purpose: Outputs FaIR trend + IV data required to plot Supp Fig 8 ========================================================================= """ # Required functions exec(open('Priestley-Centre/Near_term_warming/analysis_figure_code/'+\ 'my_boxplot.py').read()) # Required directories loaddir_IV_CMIP = 'Priestley-Centre/Near_term_warming/analysis_figure_'+\ 'code/SuppFig8/saved_data' loaddir_IV_obs = 'Priestley-Centre/Near_term_warming/IV_data' loaddir_FAIR = 'Priestley-Centre/Near_term_warming/FaIR_data/temps' savedir = 'Priestley-Centre/Near_term_warming/analysis_figure_code/'+\ 'SuppFig8/saved_data' # Choose output IV = 'obs' # 'obs' or 'model' obs = 'HadOST' # 'HadOST', 'Be' or 'CW' model = 'BCC-CSM2-MR' # 'BCC-CSM2-MR' or 'MIROC-ES2L' ### ------ Load in FaIR data ------ gsat_NDC_f = np.loadtxt(loaddir_FAIR+'/NDC_temps.csv',delimiter=',',\ dtype='str')[1:,1:].astype('float') gsat_19_f = np.loadtxt(loaddir_FAIR+'/ssp119_temps.csv',delimiter=',',\ dtype='str')[1:,1:].astype('float') gsat_26_f = np.loadtxt(loaddir_FAIR+'/ssp126_temps.csv',delimiter=',',\ dtype='str')[1:,1:].astype('float') gsat_70_f = np.loadtxt(loaddir_FAIR+'/ssp370_temps.csv',delimiter=',',\ dtype='str')[1:,1:].astype('float') gsat_85_f = np.loadtxt(loaddir_FAIR+'/ssp585_temps.csv',delimiter=',',\ dtype='str')[1:,1:].astype('float') years_f = np.loadtxt(loaddir_FAIR+'/NDC_temps.csv',delimiter=',',\ dtype='str')[1:,0].astype('float') ### ------ Calculate FaIR trends ------ # Find years 2021-2040 ind1 = np.where(years_f == 2021.)[0][0] ind2 = np.where(years_f == 2040.)[0][0] # Calculate /year trends gsat_trend_f_NDC = np.zeros(500) gsat_trend_f_19 = np.zeros(500) gsat_trend_f_26 = np.zeros(500) gsat_trend_f_70 = np.zeros(500) gsat_trend_f_85 = np.zeros(500) for mem in xrange(0,500): [m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\ gsat_NDC_f[ind1:ind2+1,mem]) gsat_trend_f_NDC[mem] = m [m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\ gsat_19_f[ind1:ind2+1,mem]) gsat_trend_f_19[mem] = m [m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\ gsat_26_f[ind1:ind2+1,mem]) gsat_trend_f_26[mem] = m [m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\ gsat_70_f[ind1:ind2+1,mem]) gsat_trend_f_70[mem] = m [m,c,r,p,SE] = stats.linregress(years_f[ind1:ind2+1],\ gsat_85_f[ind1:ind2+1,mem]) gsat_trend_f_85[mem] = m # Calculate decadal trend gsat_trend_f_NDC = gsat_trend_f_NDC*10 gsat_trend_f_19 = gsat_trend_f_19*10 gsat_trend_f_26 = gsat_trend_f_26*10 gsat_trend_f_70 = gsat_trend_f_70*10 gsat_trend_f_85 = gsat_trend_f_85*10 ### ------ Load in estimate of internal variability --------- if IV == 'obs': int_var = np.load(loaddir_IV_obs+'/gsat_20ytrends_Haus_res_'+obs+'.npy') elif IV == 'model': int_var = np.load(loaddir_IV_CMIP+'/gsat_20ytrends_CMIP6_piControl'+\ '_'+model+'.npy') nt_var = len(int_var) nt_f = len(gsat_trend_f_NDC) # Add internal variability to FaIR trends gsat_trend_f_var_NDC = np.expand_dims(gsat_trend_f_NDC,1) + \ np.expand_dims(int_var,0) gsat_trend_f_var_19 = np.expand_dims(gsat_trend_f_19,1) + \ np.expand_dims(int_var,0) gsat_trend_f_var_26 = np.expand_dims(gsat_trend_f_26,1) + \ np.expand_dims(int_var,0) gsat_trend_f_var_70 = np.expand_dims(gsat_trend_f_70,1) + \ np.expand_dims(int_var,0) gsat_trend_f_var_85 = np.expand_dims(gsat_trend_f_85,1) + \ np.expand_dims(int_var,0) # Collapse into 1d gsat_trend_f_var_NDC =
np.reshape(gsat_trend_f_var_NDC,nt_f*nt_var)
numpy.reshape
#!/usr/bin/env python import argparse import numpy as np import sys from converter_base import BeamConverter import glob from scipy.interpolate import interp1d try: import lusee have_lusee = True except: have_lusee = False class CST2LBeam(BeamConverter): def __init__ (self, root, thetamax = 90, maxfreq = None): BeamConverter.__init__(self,root,thetamax) self.maxfreq = 1e30 if maxfreq is None else maxfreq def load(self): glob_pattern=self.root+'/ffs/*.ffs' flist = glob.glob(glob_pattern) if len(flist)==0: print (f"Cannot find files in {glob_pattern}") assert(False) beam_data = [] print ("Loading frequencies: ", end = "") for fname in flist: freq=fname.split("_")[-2] assert("khz" in freq) freq=float(freq[:-3])/1e3 if (freq>self.maxfreq): continue print (f"{freq} ... ", end = "") sys.stdout.flush() lines=open(fname).readlines() skip = True for line in lines: if skip: if ("// >> Phi, Theta, Re(E_Theta), Im(E_Theta), Re(E_Phi), Im(E_Phi):" in line): skip = False else: line = line.split() if len(line)==6: line = [float(x) for x in line[:6]] beam_data.append([freq]+line) print('done.') beam =
np.array(beam_data)
numpy.array
"""Tests cac.data.transforms.DataProcessor""" import unittest import math import numpy as np import torch import random from torchaudio.transforms import TimeStretch import librosa from numpy.testing import assert_array_equal, assert_raises, \ assert_array_almost_equal from cac.config import Config from cac.data.utils import read_dataset_from_config from cac.data.transforms import DataProcessor, STFT, TimeMasking,\ FrequencyMasking, BackgroundNoise, RandomCrop, RandomPad, Volume,\ Flatten, Squeeze, Unsqueeze, Ensemble, Reshape, ISTFT, Standardize, \ Identity, Flip, Sometimes, TimeStretch, AddValue, Transpose, Log, \ FixedPad, Duration, Tempo, Onsets, \ RMSEnergy, SpectralRolloff, SpectralCentroid, ZeroCrossingRate, \ DeltaMFCC, AxisStats, ToNumpy, ToTensor, BackgroundNoiseOnImage class DataProcessorTestCase(unittest.TestCase): """Class to run tests on DataProcessor""" @classmethod def setUpClass(cls): dataset_config = { 'name': 'flusense', 'version': 'default', 'mode': 'val' } data_info = read_dataset_from_config(dataset_config) cls.signal, cls.rate = librosa.load(data_info['file'][0]) cls.numpy_signal = cls.signal.copy() cls.signal = torch.from_numpy(cls.signal) cls.default_stats = ['Mean', 'Median', 'Min', 'Max', 'RMS', 'FirstQuartile',\ 'ThirdQuartile', 'IQR', 'StandardDeviation', 'Skewness', 'Kurtosis'] def test_time_stretch(self): """Checks TimeStretch""" dummy = torch.rand((2, 201, 100)) processor = TimeStretch(max_rate=1.3, hop_length=160, n_freq=201) t_signal, rate = processor(dummy, return_rate=True) self.assertTrue(rate >= 1 and rate <= 1.3) self.assertEqual(t_signal.shape, (2, 201, math.ceil(100 / rate))) def test_log_2(self): """Checks Log transform with base 2""" dummy = torch.rand((2, 201, 100)) processor = Log(base=2) t_signal = processor(dummy) target = np.log2(dummy) assert_array_almost_equal(target, t_signal, decimal=5) def test_log_natural(self): """Checks Log transform with base e""" dummy = torch.rand((2, 201, 100)) processor = Log(base='natural') t_signal = processor(dummy) target = np.log(dummy) assert_array_almost_equal(target, t_signal, decimal=5) def test_log_10(self): """Checks Log transform with base 10""" dummy = torch.rand((2, 201, 100)) processor = Log(base=10) t_signal = processor(dummy) target = np.log10(dummy) assert_array_almost_equal(target, t_signal, decimal=5) def test_identity(self): """Checks Identity""" dummy = torch.ones(100) processor = Identity() t_signal = processor(dummy) assert_array_equal(t_signal, dummy) def test_add_value(self): """Checks AddValue""" dummy = torch.ones(100) processor = AddValue(val=0.1) t_signal = processor(dummy) assert_array_equal(t_signal, 1.1) def test_transpose(self): """Checks AddValue""" dummy = torch.ones((10, 20)) processor = Transpose(0, 1) t_signal = processor(dummy) self.assertEqual(t_signal.shape, (20, 10)) def test_flip_1d(self): """Checks Flip with 1D input""" dummy = torch.tensor([0, 1, 2]) processor = Flip() t_signal = processor(dummy) assert_array_equal(t_signal, [2, 1, 0]) def test_flip_2d(self): """Checks Flip with 2D input""" dummy = torch.tensor([[0, 1, 2], [3, 4, 5]]) processor = Flip(dim=1) t_signal = processor(dummy) assert_array_equal(t_signal, [[2, 1, 0], [5, 4, 3]]) def test_sometimes(self): """Checks Sometimes with Flip as transform""" dummy = torch.tensor([0, 1, 2]) transform_cfg = {'name': 'Flip', 'params': {}} processor = Sometimes(transform_cfg, prob=0.5) transformed = 0 not_transformed = 0 random.seed(10) for _ in range(10): t_signal = processor(dummy) try: assert_array_equal(t_signal, [2, 1, 0]) transformed += 1 except AssertionError: not_transformed += 1 self.assertTrue(not_transformed > 0) self.assertTrue(transformed > 0) def test_no_transform(self): """Checks the case with no signal transform applied""" config = [] processor = DataProcessor(config) transformed_signal = processor(self.signal) assert_array_equal(self.signal, transformed_signal) def test_standardize_with_mean_false(self): """Tests Standardize with use_mean=False""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) std = dummy.std() t_dummy = Standardize('mean-std', use_mean=False)(dummy) target = dummy / std assert_array_equal(target, t_dummy) def test_standardize_with_std_false(self): """Tests Standardize with use_std=False""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = dummy.mean() t_dummy = Standardize('mean-std', use_std=False)(dummy) target = dummy - mean assert_array_equal(target, t_dummy) def test_standardize_mean_std(self): """Tests Standardize with mean and std specified""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = 0.2 std = 0.1 t_dummy = Standardize('mean-std', mean=mean, std=std)(dummy) target = (dummy - mean) / std assert_array_almost_equal(target, t_dummy, decimal=4) def test_standardize_mean_no_std(self): """Tests Standardize with only mean specified""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = 0.2 std = dummy.std() t_dummy = Standardize('mean-std', mean=mean)(dummy) target = (dummy - mean) / std assert_array_almost_equal(target, t_dummy, decimal=4) def test_standardize_no_mean_no_std(self): """Tests Standardize with neither mean nor std specified""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = dummy.mean() std = dummy.std() t_dummy = Standardize('mean-std')(dummy) target = (dummy - mean) / std assert_array_equal(target, t_dummy) def test_standardize_mean_std_axis_non_negative(self): """Tests Standardize with mean & std specified along axis (axis >= 0)""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = dummy.mean(dim=1) std = dummy.std(dim=1) t_dummy = Standardize('mean-std', mean_axis=1, std_axis=1)(dummy) target = (dummy - mean.unsqueeze(-1)) / std.unsqueeze(-1) assert_array_almost_equal(target, t_dummy, decimal=5) def test_standardize_mean_std_axis_negative(self): """Tests Standardize with mean & std specified along axis (axis < 0)""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) mean = dummy.mean(dim=-1) std = dummy.std(dim=-1) t_dummy = Standardize('mean-std', mean_axis=-1, std_axis=-1)(dummy) target = (dummy - mean.unsqueeze(-1)) / std.unsqueeze(-1) assert_array_almost_equal(target, t_dummy, decimal=4) def test_standardize_min_max(self): """Tests Standardize with mode=min-max""" dummy = torch.tensor([[0.1, 0.5, 0.6], [0.2, 0.4, 0.3]]) _min = dummy.min() _max = dummy.max() t_dummy = Standardize(mode='min-max')(dummy) target = (dummy - _min) / (_max - _min) assert_array_almost_equal(target, t_dummy, decimal=4) def test_reshape(self): """Tests Reshape""" dummy = torch.ones(100) t_dummy = Reshape(shape=(-1, 10))(dummy) self.assertEqual(t_dummy.shape, (10, 10)) def test_flatten_1d(self): """Tests Flatten on 1D input""" dummy = torch.ones(10) t_dummy = Flatten()(dummy) assert_array_equal(dummy, t_dummy) def test_flatten_2d(self): """Tests Flatten on 2D input""" dummy = torch.ones((2, 10)) t_dummy = Flatten()(dummy) self.assertTrue(t_dummy.shape[0], 20) self.assertTrue(len(t_dummy.shape), 1) def test_squeeze(self): """Tests Squeeze""" dummy = torch.ones((10, 1)) t_dummy = Squeeze(dim=-1)(dummy) self.assertTrue(t_dummy.shape, (10,)) def test_unsqueeze(self): """Tests Unsqueeze""" dummy = torch.ones(10) t_dummy = Unsqueeze(dim=-1)(dummy) self.assertTrue(t_dummy.shape, (10, 1)) def test_pca_transform(self): """Checks the case with PCA transform applied""" n_components = 10 config = [ { 'name': 'PCA', 'params': {'n_components': n_components, 'norm': True} } ] processor = DataProcessor(config) signal = torch.randn((32, 100)) pca_signal = processor(signal) self.assertEqual(pca_signal.shape, (32,)) def test_axis_norm_1d(self): """Checks the case with axis-norm transform applied""" config = [ { 'name': 'AxisNorm', 'params': {'order': 2} } ] processor = DataProcessor(config) signal = torch.randn((100)) signal_norm = processor(signal) self.assertEqual(signal_norm, torch.norm(signal, p=2, dim=-1)) def test_axis_norm_2d(self): """Checks the case with axis-norm transform applied""" config = [ { 'name': 'AxisNorm', 'params': {'order': 2} } ] processor = DataProcessor(config) signal = torch.randn((10, 100)) signal_norm = processor(signal) computed_signal_norm = torch.norm(signal, p=2, dim=-1) assert_array_equal(signal_norm, computed_signal_norm) def test_mean_norm_1d(self): """Checks the case with axis-mean transform applied""" config = [ { 'name': 'AxisMean', 'params': {} } ] processor = DataProcessor(config) signal = torch.randn((100)) signal_mean = processor(signal) self.assertEqual(signal_mean, torch.mean(signal, dim=-1)) def test_mean_norm_2d(self): """Checks the case with axis-mean transform applied""" config = [ { 'name': 'AxisMean', 'params': {} } ] processor = DataProcessor(config) signal = torch.randn((10, 100)) signal_mean = processor(signal) computed_signal_mean = torch.mean(signal, dim=-1) assert_array_equal(signal_mean, computed_signal_mean) def test_ensemble_empty(self): config = [] processor = DataProcessor(config) signal = torch.randn((10, 100)) transformed_signal = processor(signal) assert_array_equal(transformed_signal, signal) def test_ensemble_concat(self): transforms_cfg = [ [ { 'name': 'AxisNorm', 'params': {'order': 1} } ], [ { 'name': 'AxisMean', 'params': {} } ] ] processor = Ensemble(transforms_cfg=transforms_cfg, combine='concat') dummy = torch.ones((100, 10)) t_dummy = processor(dummy) self.assertTrue(t_dummy.shape, (200,)) def test_ensemble_stack(self): transforms_cfg = [ [ { 'name': 'AxisNorm', 'params': {'order': 1} } ], [ { 'name': 'AxisMean', 'params': {} } ] ] processor = Ensemble(transforms_cfg=transforms_cfg, combine='stack') dummy = torch.ones((100, 10)) t_dummy = processor(dummy) self.assertTrue(t_dummy.shape, (2, 100)) def test_ensemble_mean_and_norm(self): config = [ { 'name': 'Ensemble', 'params': { 'transforms_cfg': [ [ { 'name': 'AxisNorm', 'params': {'order': 1} } ], [ { 'name': 'AxisMean', 'params': {} } ] ], 'combine': 'concat' } } ] processor = DataProcessor(config) signal = torch.randn((10, 100)) transformed_signal = processor(signal) subsignals = [torch.norm(signal, p=1, dim=-1), torch.mean(signal, dim=-1)] computed_signal = torch.cat(subsignals) assert_array_equal(transformed_signal, computed_signal) def test_noise_addition_transform(self): """Checks the case with noise addition signal transform applied""" seed = 0 noise_scale = 0.005 config = [ { 'name': 'WhiteNoise', 'params': {'noise_scale': noise_scale} } ] torch.manual_seed(seed) processor = DataProcessor(config) pred_transformed_signal = processor(self.signal) torch.manual_seed(seed) noise = torch.randn_like(self.signal) * noise_scale gt_transformed_signal = self.signal + noise self.assertEqual(self.signal.shape, pred_transformed_signal.shape) assert_array_equal(gt_transformed_signal, pred_transformed_signal) def test_resize_1d(self): """Checks Resize transform with 1D data""" size = (1, 1000) config = [ { 'name': 'Resize', 'params': {'size': size} } ] processor = DataProcessor(config) dummy_signal = torch.zeros(8000) transformed_signal = processor(dummy_signal) self.assertEqual(transformed_signal.shape, (*size[1:],)) def test_resize_2d(self): """Checks Resize transform with 2D data""" size = (128, 20) config = [ { 'name': 'Resize', 'params': {'size': size} } ] processor = DataProcessor(config) dummy_signal = torch.zeros((128, 50)) transformed_signal = processor(dummy_signal) self.assertEqual(transformed_signal.shape, size) def test_resize_3d(self): """Checks Resize transform with 3D data""" size = (128, 20) config = [ { 'name': 'Resize', 'params': {'size': size} } ] processor = DataProcessor(config) dummy_signal = torch.zeros((2, 128, 50)) transformed_signal = processor(dummy_signal) self.assertEqual(transformed_signal.shape, (2, *size)) def test_rescale_transform(self): """Checks Resize transform""" config = [ { 'name': 'Rescale', 'params': {'value': 255} } ] processor = DataProcessor(config) dummy_signal = torch.ones(100) * 255. transformed_signal = processor(dummy_signal) self.assertTrue(transformed_signal.max(), 1.0) def test_spectrogram_transform(self): """Tests Spectrogram with no window specified""" n_fft = 440 config = [ { 'name': 'Spectrogram', 'params': {'n_fft': n_fft} } ] # hard coded for this particular file num_frames = 21304 processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, (n_fft // 2 + 1, num_frames)) def test_spectrogram_transform_complex(self): """Checks the case with spectrogram transform applied""" n_fft = 440 config = [ { 'name': 'Spectrogram', 'params': {'n_fft': n_fft, 'power': None} } ] # hard coded for this particular file num_frames = 21304 processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, (2, n_fft // 2 + 1, num_frames)) def test_spectrogram_transform_window(self): """Tests Spectrogram with 'hann' window specified""" n_fft = 440 config = [ { 'name': 'Spectrogram', 'params': {'n_fft': n_fft, 'window': 'hann'} } ] # hard coded for this particular file num_frames = 21304 processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, (n_fft // 2 + 1, num_frames)) def test_gtfb(self): """Tests GTFB""" num_filters = 20 config = [ { 'name': 'GTFB', 'params': { 'num_filters': num_filters, 'low_freq': 100 } } ] processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(len(signal.shape), 2) self.assertEqual(signal.shape[0], num_filters) self.assertIsInstance(signal, torch.Tensor) def test_melspectrogram_transform(self): """Checks the case with mel-spectrogram transform applied""" n_mels = 128 config = [ { 'name': 'MelSpectrogram', 'params': {'n_mels': n_mels, 'win_length': None, 'hop_length': None} } ] # hard coded for this particular file num_frames = 23435 processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, (n_mels, num_frames)) def test_amplitude_to_db_transform(self): """Checks the case with amplitude-to-DB (log) transform applied""" config = [ { 'name': 'AmplitudeToDB', 'params': {} } ] processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, self.signal.shape) def test_resample_transform(self): """Checks the case with resample transform applied""" config = [ { 'name': 'Resample', 'params': { 'orig_freq': 32000, 'new_freq': 16000 } } ] processor = DataProcessor(config) output = processor(self.signal) # number of output samples should be half the number of input # samples self.assertEqual(output.shape[0], self.signal.shape[0] // 2) def test_mfcc_transform(self): """Checks the case with MFCC transform applied""" config = [ { 'name': 'MFCC', 'params': {'sample_rate': self.rate, 'n_mfcc': 40} } ] # hard coded for this particular file n_mfcc = 40 num_frames = 23435 processor = DataProcessor(config) signal = processor(self.signal) self.assertEqual(signal.shape, (n_mfcc, num_frames)) def test_stft_no_window(self): """Tests STFT transform without window specified""" processor = STFT(n_fft=400) output = processor(self.signal) # size should be num_frames x (n_fft // 2 + 1) x 2 self.assertNotIn('window', processor.kwargs) self.assertEqual(output.shape[0], 2) self.assertEqual(output.shape[1], 201) def test_stft_hann_window(self): """Tests STFT transform with `hann` window specified""" processor = STFT(n_fft=400, window='hann') output = processor(self.signal) # size should be num_frames x (n_fft // 2 + 1) x 2 self.assertIn('window', processor.kwargs) self.assertEqual(len(processor.kwargs['window']), 400) self.assertEqual(output.shape[0], 2) self.assertEqual(output.shape[1], 201) def test_istft_no_window(self): """Tests ISTFT transform without window specified""" dummy = torch.ones((2, 257, 200)) processor = ISTFT(n_fft=512) output = processor(dummy) self.assertNotIn('window', processor.kwargs) self.assertEqual(len(output.shape), 1) def test_istft_hann_window(self): """Tests ISTFT transform with `hann` window specified""" dummy = torch.ones((2, 257, 200)) processor = ISTFT(n_fft=512, window='hann') output = processor(dummy) self.assertIn('window', processor.kwargs) self.assertEqual(len(processor.kwargs['window']), 512) self.assertEqual(len(output.shape), 1) def test_istft_channels_last(self): """Tests ISTFT transform with input in channels_last format""" dummy = torch.ones((257, 200, 2)) processor = ISTFT(n_fft=512, channels_first=False) output = processor(dummy) self.assertEqual(len(output.shape), 1) def test_time_masking_small_size(self): """Tests TimeMasking with mask size < len(input)""" dummy_input = torch.ones((128, 20)) processor = TimeMasking(max_len=50, min_num=1, max_num=5) signal = processor(dummy_input, return_mask_params=True) assert_array_equal(signal, dummy_input) def test_time_masking_2d(self): """Tests TimeMasking with 2D input""" dummy_input = torch.ones((128, 20)) processor = TimeMasking(max_len=5, min_num=1, max_num=5) signal, mask_params = processor(dummy_input, return_mask_params=True) signal = signal.numpy() for mask_param in mask_params: start_index, length = mask_param assert_array_equal(signal[:, start_index: start_index + length], 0) def test_time_masking_3d(self): """Tests TimeMasking with 3D input""" dummy_input = torch.ones((2, 128, 20)) processor = TimeMasking(max_len=5, min_num=1, max_num=5) signal, mask_params = processor(dummy_input, return_mask_params=True) signal = signal.numpy() for mask_param in mask_params: start_index, length = mask_param assert_array_equal(signal[:, :, start_index: start_index + length], 0) def test_frequency_masking_2d(self): """Tests FrequencyMasking with 2D input""" dummy_input = torch.ones((128, 20)) processor = FrequencyMasking(max_len=5, min_num=1, max_num=5) signal, mask_params = processor(dummy_input, return_mask_params=True) signal = signal.numpy() for mask_param in mask_params: start_index, length = mask_param assert_array_equal(signal[start_index: start_index + length], 0) def test_frequency_masking_3d(self): """Tests FrequencyMasking with 3D input""" dummy_input = torch.ones((2, 128, 20)) processor = FrequencyMasking(max_len=5, min_num=1, max_num=5) signal, mask_params = processor(dummy_input, return_mask_params=True) signal = signal.numpy() for mask_param in mask_params: start_index, length = mask_param assert_array_equal(signal[:, start_index: start_index + length], 0) def test_frequency_masking_3d_deterministic(self): """Tests FrequencyMasking with 3D input with determistic=True""" dummy_input = torch.ones((2, 128, 20)) processor = FrequencyMasking(max_len=64, start_index=0, deterministic=True) signal = processor(dummy_input) assert_array_equal(signal[:, :64], 0) def test_random_crop_valid_longer(self): """Tests RandomCrop with valid input shape and length > crop_size""" dummy_input = torch.ones(441000) processor = RandomCrop(crop_size=44100) signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) def test_random_crop_valid_equal(self): """Tests RandomCrop with valid input shape and length = crop_size""" dummy_input = torch.ones(44100) processor = RandomCrop(crop_size=44100) signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) def test_random_crop_valid_shorter(self): """Tests RandomCrop with valid input shape and length < crop_size""" dummy_input = torch.ones(22050) processor = RandomCrop(crop_size=44100) signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) def test_random_crop_2d_valid_longer(self): """Tests RandomCrop with 2d input and length > crop_size""" dummy_input = torch.ones((257, 200)) processor = RandomCrop(crop_size=100, dim=-1) signal = processor(dummy_input) self.assertEqual(signal.shape, (257, 100)) def test_random_crop_2d_valid_equal(self): """Tests RandomCrop with 2d input and length = crop_size""" dummy_input = torch.ones((257, 100)) processor = RandomCrop(crop_size=100, dim=-1) signal = processor(dummy_input) self.assertEqual(signal.shape, (257, 100)) def test_random_crop_2d_valid_shorter(self): """Tests RandomCrop with 2d input and length < crop_size""" dummy_input = torch.ones((257, 50)) processor = RandomCrop(crop_size=100, dim=-1) signal = processor(dummy_input) self.assertEqual(signal.shape, (257, 100)) def test_random_pad_valid_1d(self): """Tests RandomPad with valid 1D input""" dummy_input = torch.ones(44000) processor = RandomPad(target_size=44100) signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) def test_random_pad_valid_2d(self): """Tests RandomPad with valid 2D input""" dummy_input = torch.ones((128, 23)) processor = RandomPad(target_size=30) signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) def test_random_pad_valid_3d(self): """Tests RandomPad with valid 3D input""" dummy_input = torch.ones((2, 128, 23)) processor = RandomPad(target_size=30) signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) def test_random_pad_valid_equal(self): """Tests RandomPad with input length = target_size""" dummy_input = torch.ones(44100) processor = RandomPad(target_size=44100) signal = processor(dummy_input) assert_array_equal(signal.numpy(), dummy_input.numpy()) def test_random_pad_invalid_shape(self): """Tests RandomPad with invalid input shape""" dummy_input = torch.ones((2, 2, 2, 2)) processor = RandomPad(target_size=44100) with self.assertRaises(ValueError): signal = processor(dummy_input) def test_random_pad_invalid_axis(self): """Tests RandomPad with invalid axis""" dummy_input = torch.ones(44000) processor = RandomPad(target_size=44100, axis=1) with self.assertRaises(ValueError): signal = processor(dummy_input) def test_random_pad_invalid_longer(self): """Tests RandomPad with invalid input length""" dummy_input = torch.ones((44101)) processor = RandomPad(target_size=44100) with self.assertRaises(ValueError): signal = processor(dummy_input) def test_fixed_pad_valid_1d_repeat(self): """Tests FixedPad with valid 1D input with repeat""" dummy_input = torch.rand(44000) processor = FixedPad(target_size=44100, pad_mode='repeat') signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) assert_array_equal(signal[44000:], signal[:100]) def test_fixed_pad_valid_1d_repeat_multiple_full(self): """Tests FixedPad with valid 1D input with repeat multiple""" dummy_input = torch.rand(20000) processor = FixedPad(target_size=44100, pad_mode='repeat') signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) assert_array_equal(signal[20000: 40000], dummy_input) assert_array_equal(signal[40000:], dummy_input[:4100]) def test_fixed_pad_valid_2d_repeat(self): """Tests FixedPad with valid 2D input with repeat""" dummy_input = torch.rand((128, 23)) processor = FixedPad(target_size=30, pad_mode='repeat') signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) assert_array_equal(signal[:, 23:], signal[:, :7]) def test_fixed_pad_valid_3d_repeat(self): """Tests FixedPad with valid 3D input with repeat""" dummy_input = torch.rand((2, 128, 23)) processor = FixedPad(target_size=30, pad_mode='repeat') signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) assert_array_equal(signal[:, :, 23:], signal[:, :, :7]) def test_fixed_pad_valid_1d_reflect(self): """Tests FixedPad with valid 1D input with reflect""" dummy_input = torch.rand(44000) processor = FixedPad(target_size=44100, pad_mode='reflect') signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) assert_array_equal( signal[44000:], torch.flip(dummy_input[-101:-1], (0,))) def test_fixed_pad_valid_1d_reflect_multiple_full(self): """Tests FixedPad with valid 1D input with reflect multiple""" dummy_input = torch.rand(20000) processor = FixedPad(target_size=44100, pad_mode='reflect') signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) assert_array_equal(signal[20000: 40000], torch.flip(dummy_input, (0,))) assert_array_equal(signal[40000:], dummy_input[1:4101]) def test_fixed_pad_valid_2d_reflect(self): """Tests FixedPad with valid 2D input with reflect""" dummy_input = torch.rand((128, 23)) processor = FixedPad(target_size=30, pad_mode='reflect') signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) assert_array_equal( signal[:, 23:], torch.flip(dummy_input[:, -8:-1], (1,))) def test_fixed_pad_valid_3d_reflect(self): """Tests FixedPad with valid 3D input with reflect""" dummy_input = torch.rand((2, 128, 23)) processor = FixedPad(target_size=30, pad_mode='reflect') signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30) assert_array_equal( signal[:, :, 23:], torch.flip(dummy_input[:, :, -8:-1], (2,))) def test_fixed_pad_valid_equal(self): """Tests FixedPad with input length = target_size for reflect""" dummy_input = torch.rand(44100) processor = FixedPad(target_size=44100, pad_mode='reflect') signal = processor(dummy_input) assert_array_equal(signal.numpy(), dummy_input.numpy()) def test_fixed_pad_valid_1d(self): """Tests FixedPad with valid 1D input""" dummy_input = torch.ones(44000) processor = FixedPad(target_size=44100) signal = processor(dummy_input) self.assertEqual(signal.shape[0], 44100) assert_array_equal(signal[44000:], 0) def test_fixed_pad_valid_2d(self): """Tests FixedPad with valid 2D input""" dummy_input = torch.ones((128, 23)) processor = FixedPad(target_size=30) signal = processor(dummy_input) self.assertEqual(signal.shape[-1], 30)
assert_array_equal(signal[:, 23:], 0)
numpy.testing.assert_array_equal
#!/usr/bin/env python3 """Example 6.2, page 125""" import copy import multiprocessing as mp import numpy as np import matplotlib.pyplot as plt # Create graph: vertices are states, edges are actions (transitions) STATE_ACTIONS = {'left': ('left', 'left'), 'a': ('left', 'b'), 'b': ('a', 'c'), 'c': ('b', 'd'), 'd': ('c', 'e'), 'e': ('d', 'right'), 'right': ('right', 'right')} # List of states STATES = list(STATE_ACTIONS.keys()) TERMINALS = 'left', 'right' # Transition probabilities PROBABILITIES = np.full((len(STATES), 2), [0.5, 0.5]) # State values (probability to reach 'Right' state) INIT_VALUES = np.full(len(STATES), 0.5)
np.put(INIT_VALUES, [0, -1], 0)
numpy.put
# -*- coding: utf-8 -*- """ This is a small module for conveniently reading .RiDat binary files in Python. It only requires numpy to work. The main function is "read_ridat_file", which receives the file's path as input, and returns 4 things: ....the time data of the acquisition ....the real component of the signal ....the imaginary component of the signal ....the acquisition parameters (a class with "System", "Application" and "Processing" parameters, and a title string) Another function is "export_ridat_data_to_text_file", which opens a .RiDat file and export the time, real and imaginary arrays to a text file, with values separated by '\t' (default) or another delimiter defined by the yser. The remaining functions are for reading the file's bytes into data types, while the classes only serve as a way to wrap all of the 70343196 zillion parameters inside these .RiDat files. These are divided in "System", "Application" and "Processing" parameters. There are also "RfChannel" parameters. I didn't kept track of what all these parameters means, but it appears that most of them have the same name inside RINMR. This is a free software with a MIT License. Created by <NAME> -> <EMAIL> """ __author__ = '<NAME>' __copyright__ = '2019' __credits__ = ["UFFLAR", 'Schlumberger'] __license__ = 'MIT License' __version__ = '1.0' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' __status__ = 'active' import struct import numpy as np # Class for storing Rf Channel parameters class RfChannelsParameters(object): def __init__(self): self.SF = 0.0 self.Offset = 0.0 self.MultReg = 0 self.PhaseTwiddle = 0 self.ChanAOffset = 0 self.ChanBOffset = 0 self.ExtAPhaseTrim = 0 self.ExtAAmpTrim = 0 self.ExtBPhaseTrim = 0 self.ExtBAmpTrim = 0 self.IntAAmpTrim = 0 self.IntBAmpTrim = 0 self.PhaseTrim0 = 0 self.AmpTrim0 = 0 self.PhaseTrim90 = 0 self.AmpTrim90 = 0 self.PhaseTrim180 = 0 self.AmpTrim180 = 0 self.PhaseTrim270 = 0 self.AmpTrim270 = 0 self.quadtrim = 0 # Class for storing System parameters class SysParameters(object): def __init__(self): self.Dead1 = 0.0 self.Dead2 = 0.0 self.P90 = 0.0 self.P180 = 0.0 self.rf_channels = [RfChannelsParameters(), RfChannelsParameters(), RfChannelsParameters()] self.GSH1, self.GSH2, self.GSH3, self.GSH4, self.GSH5 = "", "", "", "", "" self.EndTime = 0.0 self.PreXK, self.PreXA = np.zeros(4, dtype=np.float64), np.zeros(4, dtype=np.float64) self.PreYK, self.PreYA = np.zeros(4, dtype=np.float64), np.zeros(4, dtype=np.float64) self.PreZK, self.PreZA = np.zeros(4, dtype=np.float64), np.zeros(4, dtype=np.float64) self.XB0K, self.XB0A = 0.0, 0.0 self.YB0K, self.YB0A = 0.0, 0.0 self.ZB0K, self.ZB0A = 0.0, 0.0 self.DummyParam1, self.DummyParam2 = 0.0, 0.0 self.Dec90 = 0.0 self.CPD = "" self.Trigger = 0 self.XB0, self.YB0, self.ZB0 = 0.0, 0.0, 0.0 self.XOffset, self.YOffset, self.ZOffset = 0.0, 0.0, 0.0 self.Acquisition = 0 # Class for storing Application parameters class AppParameters(object): def __init__(self): self.SI = 0 self.DW = 0.0 self.Pulses =
np.zeros(5, dtype=np.float64)
numpy.zeros
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """test cases for normal distribution""" import numpy as np from scipy import stats import mindspore.context as context import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore import dtype context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): """ Test class: probability of normal distribution. """ def __init__(self): super(Net, self).__init__() self.n = nn.Normal(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32) @ms_function def construct(self, x_): return self.n('prob', x_) class Net1(nn.Cell): """ Test class: log probability of normal distribution. """ def __init__(self): super(Net1, self).__init__() self.n = nn.Normal(np.array([3.0]), np.array([[2.0], [4.0]]), dtype=dtype.float32) @ms_function def construct(self, x_): return self.n('log_prob', x_) class Net2(nn.Cell): """ Test class: kl_loss of normal distribution. """ def __init__(self): super(Net2, self).__init__() self.n = nn.Normal(np.array([3.0]), np.array([4.0]), dtype=dtype.float32) @ms_function def construct(self, x_, y_): return self.n('kl_loss', 'Normal', x_, y_) class Net3(nn.Cell): """ Test class: mean/sd of normal distribution. """ def __init__(self): super(Net3, self).__init__() self.n = nn.Normal(np.array([3.0]), np.array([2.0, 4.0]), dtype=dtype.float32) @ms_function def construct(self): return self.n('mean'), self.n('sd') class Net4(nn.Cell): """ Test class: mean/sd of normal distribution. """ def __init__(self, shape, seed=0): super(Net4, self).__init__() self.n = nn.Normal(np.array([3.0]), np.array([[2.0], [4.0]]), seed=seed, dtype=dtype.float32) self.shape = shape @ms_function def construct(self, mean=None, sd=None): return self.n('sample', self.shape, mean, sd) def test_pdf(): """ Test pdf. """ norm_benchmark = stats.norm(np.array([3.0]), np.array([[2.0], [4.0]])) expect_pdf = norm_benchmark.pdf([1.0, 2.0]).astype(np.float32) pdf = Net() output = pdf(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 1e-6 assert (np.abs(output.asnumpy() - expect_pdf) < tol).all() def test_log_likelihood(): """ Test log_pdf. """ norm_benchmark = stats.norm(np.array([3.0]), np.array([[2.0], [4.0]])) expect_logpdf = norm_benchmark.logpdf([1.0, 2.0]).astype(np.float32) logprob = Net1() output = logprob(Tensor([1.0, 2.0], dtype=dtype.float32)) tol = 1e-6 assert (np.abs(output.asnumpy() - expect_logpdf) < tol).all() def test_kl_loss(): """ Test kl_loss. """ mean_a = np.array([3.0]).astype(np.float32) sd_a =
np.array([4.0])
numpy.array
import numpy as np import pyDOE2 import sample_generator as sg from copy import deepcopy import os import glob import pickle import sys import emcee from linna.nn import * from scipy.special import erf from scipy.stats import chi2 import io import gc import torch from torch.utils.data import Dataset, DataLoader from torch.utils import mkldnn as mkldnn_utils from linna.util import * import tempfile def ml_sampler(outdir, theory, priors, data, cov, init, pool, nwalkers, gpunode, omegab2cut=None, nepoch=4500, method="zeus", nbest=None, chisqcut=None, loglikelihoodfunc=None): """ LINNA main function with hyperparameters set to values described in To et al. 2022 Args: outdir (string): output directory theory (function): theory model priors (dict of str: [float, float]): string can be either flat or gauss. If the string is 'flat', [a,b] indicates the lower and upper limits of the prior. If the string is 'gauss', [a,b] indicates the mean and sigma. data (1d array): float array, data vector cov (2d array): float array, covariance matrix init (ndarray): initial guess of mcmc, pool (mpi pool, optional): a mpi pool instance that can do pool.map(function, iterables). nwalkers (int) number of mcmc walkers gpunode (string): name of gpu node omegab2cut (list of int): 2 elements containing the lower and upper limits of omegab*h^2 nepoch (int, optional): maximum number of epoch for the neural network training method (string, optional): Samplers. LINNA supports `emcee` and `zeus`(default) nbest (int or list of int): number of points to include in the training set per iteration according to the optimizer chisqcut (float, optional): cut the training data if there chisq is greater than this value loglikelihoodfunc (callable, optional): function of model, data , inverse of covariance matrix and return the log liklihood value. If None, then use gaussian likelihood Returns: nd array: MCMC chain 1d array: log probability of MCMC chain """ ntrainArr = [10000, 10000, 10000, 10000] nvalArr = [500, 500, 500, 500] if method=="emcee": nkeepArr = [2, 2, 5, 4] ntimesArr = [5, 5, 10, 15] ntautolArr = [0.03, 0.03, 0.02, 0.01] temperatureArr = [4.0, 2.0, 1.0, 1.0] meanshiftArr = [0.2, 0.2, 0.2, 0.2] stdshiftArr = [0.15,0.15,0.15,0.15] elif method=="zeus": nkeepArr = [2, 2, 5, 5] ntimesArr = [5, 5, 10, 50] ntautolArr = [0.03, 0.03, 0.02, 0.01] temperatureArr = [4.0, 2.0, 1.0, 1.0] meanshiftArr = [0.2, 0.2, 0.2, 0.2] stdshiftArr = [0.15,0.15,0.15,0.15] else: raise NotImplementedError(method) dolog10index = None ypositive = False device = "cuda" docuda=False tsize=1 nnmodel_in = ChtoModelv2 params = {} params["trainingoption"] = 1 params["num_epochs"] = nepoch params["batch_size"] = 500 return ml_sampler_core(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, meanshiftArr, stdshiftArr, outdir, theory, priors, data, cov, init, pool, nwalkers, device, dolog10index, ypositive, temperatureArr, omegab2cut, docuda, tsize, gpunode, nnmodel_in, params, method, nbest=nbest, chisqcut=chisqcut, loglikelihoodfunc=loglikelihoodfunc) def ml_sampler_core(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, meanshiftArr, stdshiftArr, outdir, theory, priors, data, cov, init, pool, nwalkers, device, dolog10index, ypositive, temperatureArr, omegab2cut=None, docuda=False, tsize=1, gpunode=None, nnmodel_in=None, params=None, method="emcee", nbest=None, chisqcut=None, loglikelihoodfunc=None, nsigma=3): """ LINNA main function Args: ntrainArr (int array): number of training data per iteration nvalArr (int array): number of validation data per iteration nkeepArr (int array): number of autocorrelation time to be kept ntimesArr (int array): number of autocorrelation time to stop mcmc ntautolArr (float array): error limit of autocorrelation time meanshiftArr (float array): limit on mean shift of parameter estimation from the first and second half of the chain stdshiftArr (float array): limit on std shift of parameter estimation from the first and second half of the chain outdir (string): output directory theory (function): theory model priors (dict of str: [float, float]): string can be either flat or gauss. If the string is 'flat', [a,b] indicates the lower and upper limits of the prior. If the string is 'gauss', [a,b] indicates the mean and sigma. data (1d array): float array, data vector cov (2d array): float array, covariance matrix init (ndarray): initial guess of mcmc, pool (object): mpi4py pool instance nwalkers (int) number of mcmc walkers device (string): cpu or gpu dolog10index (int array): index of parameters to do log10 ypositive (bool): whether the data vector is expected to be all positive temperatureArr (float array): temperature parameters for each iteration omegab2cut (list of int): 2 elements containing the lower and upper limits of omegab*h^2 docuda (bool): whether do gpu for evaluation tsize (int, optional): number of cores for training gpunode (string): name of gpu node nnmodel_in (string): instance of neural network model params (dictionary): dictionary of parameters method (string): sampling method nbest (int or list of int): number of points to include in the training set per iteration according to the optimizer chisqcut (float, optional): cut the training data if there chisq is greater than this value loglikelihoodfunc (callable): function of model, data , inverse of covariance matrix and return the log liklihood value nsigma (float): the training point in the first iteration will be generated within nsigma of the gaussian prior Returns: nd array: MCMC chain 1d array: log probability of MCMC chain """ ndim = len(init) sigma = np.sqrt(np.diag(cov)) inv_cov = np.linalg.inv(cov) prior_range = [] for item in priors: if item['dist'] == 'flat': prior_range.append([item['arg1'], item['arg2']]) elif item['dist'] == 'gauss': prior_range.append([item['arg1']-5*item['arg2'], item['arg1']+5*item['arg2']]) else: print("not implement dist : {0}".format(item['dist']), flush=True) assert(0) transform = Transform(priors) invtransform = invTransform(priors) init = invtransform(init) if method=="emcee": filename = "chemcee_256.h5" elif method == "zeus": filename = "zeus_256.h5" else: raise NotImplementedError(method) for i, (nt, nv, nk, ntimes, tautol, temperature, meanshift, stdshift) in enumerate(zip(ntrainArr, nvalArr, nkeepArr, ntimesArr, ntautolArr, temperatureArr, meanshiftArr, stdshiftArr)): if isinstance(nbest, list): nbest_in = nbest[i] if nbest_in <=0: nbest_in = None else: nbest_in = nbest if nbest_in is not None: tempdir = tempfile.TemporaryDirectory() def negloglike(x): d = data-theory([-1,x], tempdir) return d.dot(inv_cov.dot(d)) else: negloglike=None temperature = temperature**2 print("#"*100) print("iteration: {0}".format(i), flush=True) print("#"*100) outdir_in = os.path.join(outdir, "iter_{0}/".format(i)) if i==0: chain=None else: chain_name = os.path.join(os.path.join(outdir, "iter_{0}/".format(i-1)), filename[:-3]) if os.path.isfile(chain_name+".h5"): chain_name = chain_name+".h5" chain, _temp, _temp2= read_chain_and_cut(chain_name.format(i-1), nk, ntimes, method=method) else: chain_name = chain_name+".txt" chain = np.loadtxt(chain_name)[-100000:,:-1] #Generate training ntrain = nt nval = nv nnsampler = NN_samplerv1(outdir_in, prior_range) if "trainingoption" in params: options = params['trainingoption'] else: options = 0 generate_training_point(theory, nnsampler, pool, outdir_in, ntrain, nval, data, inv_cov, chain, nsigma=nsigma, omegab2cut=omegab2cut, options=options, negloglike= negloglike, nbest_in=nbest_in, chisqcut=chisqcut) chain = None del chain if i!=0: try: del _temp del _temp2 except: pass gc.collect() if (pool is None) or pool.is_master(): outdir_list = [os.path.join(outdir, "iter_{0}/".format(m)) for m in range(int(i+1))] f = open(outdir_list[-1]+"/model_pickle.pkl", 'wb') pickle.dump(train_NN, f) f.close() f = open(outdir_list[-1]+"/model_args.pkl", 'wb') if gpunode is not None: docuda=True else: docuda=torch.cuda.is_available() pickle.dump([nnsampler, cov, inv_cov, sigma, outdir_in, outdir_list, data, dolog10index, ypositive, False, 2, temperature, docuda, None, 1, nnmodel_in, params, nbest_in is not None], f) f.close() if not os.path.isfile(outdir_list[-1] + "/finish.pkl"): if gpunode == 'automaticgpu': while(True): gpufile = os.path.join(outdir, "gpunodeinfo.pkl") try: if os.path.isfile(gpufile): with open(gpufile, 'rb') as f: gpuinfo = pickle.load(f) gpunode = gpuinfo["nodename"] break except: pass if gpunode is not None: print("running gpu on {0}".format(gpunode), flush=True) os.system("cat {2}/train_gpu.py | ssh {0} python - {1} {3}".format(gpunode, outdir_list[-1], os.path.dirname(os.path.abspath(__file__)), "cuda")) while(1): if os.path.isfile(outdir_list[-1] + "/finish.pkl"): break else: os.system("python {1}/train_gpu.py {0} {2}".format(outdir_list[-1], os.path.dirname(os.path.abspath(__file__)), "nocuda")) while(1): if os.path.isfile(outdir_list[-1] + "/finish.pkl"): break #Retrieve model model, y_invtransform_data = retrieve_model(outdir_in, len(init), len(data), nnmodel_in) if not docuda: model.model = model.model.to(memory_format=torch.channels_last) model.MKLDNN=True #Do MCMC if os.path.isfile(os.path.join(outdir_in, filename)): continue invcov_new = torch.from_numpy(inv_cov.astype(np.float32)).to('cpu').detach().clone().requires_grad_() data_new = torch.from_numpy(data.astype(np.float32)).to('cpu').detach().clone().requires_grad_() if loglikelihoodfunc is None: loglikelihoodfunc = gaussianlogliklihood log_prob = Log_prob(data_new, invcov_new, model, y_invtransform_data, transform, temperature, nograd=True, loglikelihoodfunc=loglikelihoodfunc) dlnp = None ddlnp = None if pool is not None: pool.noduplicate=True run_mcmc(nnsampler, outdir_in, method, ndim, nwalkers, init, log_prob, dlnp=dlnp, ddlnp=ddlnp, pool=pool, transform=transform, ntimes=ntimes, tautol=tautol, meanshift=meanshift, stdshift=stdshift, nk=nk) if pool is not None: pool.noduplicate_close() chain_name = os.path.join(os.path.join(outdir, "iter_{0}/".format(len(ntrainArr)-1)), filename[:-3]) if os.path.isfile(chain_name+".h5"): chain_name = chain_name+".h5" chain, log_prob_samples_x, reader = read_chain_and_cut(chain_name.format(len(ntrainArr)-1), nk, ntimes, method=method) log_prob_samples_x = reader.get_log_prob(discard=0, flat=True, thin=1) else: chain_name = chain_name+".txt" chain = np.loadtxt(chain_name)[-100000:,:-1] log_prob_samples_x = np.loadtxt(chain_name)[-100000:,-1] #Optional importance sampling if 'nimp' in params.keys(): if not os.path.isfile(outdir+"/samples_im.npy"): chain_name = os.path.join(os.path.join(outdir, "iter_{0}/".format(len(ntrainArr)-1)), filename[:-3]) if os.path.isfile(chain_name+".h5"): chain_name = chain_name+".h5" chain, log_prob_samples_x, reader = read_chain_and_cut(chain_name.format(len(ntrainArr)-1), nk, ntimes, method=method, flat=True) else: chain_name = chain_name+".txt" chain = np.loadtxt(chain_name)[-100000:,:-1] log_prob_samples_x = np.loadtxt(chain_name)[-100000:,-1] select = np.random.randint(0, len(chain), params['nimp']) chain = chain[select] log_prob_samples_x = log_prob_samples_x[select] np.save(outdir+"/samples_im.npy", chain) np.save(outdir+"/log_prob_samples_x.npy", log_prob_samples_x) else: chain = np.load(outdir+"/samples_im.npy") log_prob_samples_x =
np.load(outdir+"/log_prob_samples_x.npy")
numpy.load
import json import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import csv import time import copy import os from datetime import datetime import error_metrics global gld_num gld_num = '1' os.chdir('/home/ankit/PFO-ADC-DER-Testbed/ADC-DER-Testbed/testbed/post_process') # discard_time = 3600*4 ## loading cosim_manager data lp = open('./cosim_data.json').read() cosim_data = json.loads(lp) ## Appending all cosim data with one more entry for key, value in cosim_data.items(): for k, v in value.items(): if k == 'Timestamp': # v.append(v[-1]+v[-1]-v[-2]) # adding one more timestamp v.append(v[-1] + v[0]) else: v.append(v[-1]) # repeating the last value again cosim_data[key][k] = v cosim_time = cosim_data[list(cosim_data)[0]]['Timestamp'] cosim_data['time'] = np.array([int(i) for i in cosim_time]) # create mapping of each node to its ADC adc_nodes_map=[] adc_file = "./../../../GLD/initial_scenario/ADC_Location/ADC_Placement_by_Voltage_Drop.csv" with open(adc_file, mode='r') as csv_file: for i in range(1): next(csv_file) csv_reader = csv.reader(csv_file) for row in csv_reader: adc_nodes_map.append([row[0], row[-1]]) adc_nodes_map = np.array(adc_nodes_map) #function to return adc name of the input node def find_adc(node, adc_nodes_map=adc_nodes_map): ind = np.where(adc_nodes_map[:,0]==node)[0][0] adc_name = 'M' + gld_num + '_ADC' + adc_nodes_map[ind,1] return adc_name # Loading gld_data.json lp = open('GLD_' + gld_num + '_data.json').read() gld_data = json.loads(lp) # creating a dict to map each adc to the indexes of devices in gld_data for each der type # adc['der']['adc name']=[indexes in the gld data] # t=time.time() # adc_ind = {} # der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']] # for der in der_type: # adc_ind[der[0]] = {} # obj = gld_data[der[0]][der[1]]['object_name'] # for a in obj: # b = a.split('_')[-2][1:] # # if 'l102_tm' in a: # if find_adc(b) not in adc_ind[der[0]]: # adc_ind[der[0]][find_adc(b)] = [] # adc_ind[der[0]][find_adc(b)].append(obj.index(a)) # print('elapsed time is ',time.time()-t) # creating a dict to map each adc to the indexes of devices in gld_data for each der type # adc_ind['adc name']['der']=[indexes in the gld data] t=time.time() adc_ind = {} der_type=[['battInv', 'power'], ['solarInv','power'], ['hvac','power'], ['wh','power']] for der in der_type: obj = gld_data[der[0]][der[1]]['object_name'] for a in obj: b = a.split('_')[-2][1:] # if 'l102_tm' in a: if find_adc(b) == 'M1_ADCNONE': continue if find_adc(b) not in adc_ind: adc_ind[find_adc(b)] = {} if der[0] not in adc_ind[find_adc(b)]: adc_ind[find_adc(b)][der[0]]=[] adc_ind[find_adc(b)][der[0]].append(obj.index(a)) # print('elapsed time is ',time.time()-t) #Voltages voltages = np.array(gld_data['hvac']['voltages']['values']).astype(np.cfloat) # Actuation Signals #hrs = gld_data['battInv']['P_Out']['time'] battInv_Pout = np.array(gld_data['battInv']['P_Out']['values']).astype(np.float) battInv_Qout = np.array(gld_data['battInv']['Q_Out']['values']).astype(np.float) solarInv_Pout = np.array(gld_data['solarInv']['P_Out']['values']).astype(np.float) solarInv_Qout = np.array(gld_data['solarInv']['Q_Out']['values']).astype(np.float) hvac_seth = np.array(gld_data['hvac']['heating_setpoint']['values']).astype(np.float) hvac_setc = np.array(gld_data['hvac']['cooling_setpoint']['values']).astype(np.float) hvac_cooling_demand = (np.array(gld_data['hvac']['cooling_demand']['values'])).astype(np.float) hvac_fan_power = (np.array(gld_data['hvac']['fan_design_power']['values'])).astype(np.float)/1000 hvac_rating = hvac_cooling_demand+hvac_fan_power hvac_c_thermal_capacity = (np.array(gld_data['hvac']['design_cooling_capacity']['values'])).astype(np.float) hvac_c_cop = (np.array(gld_data['hvac']['cooling_COP']['values'])).astype(np.float) hvac_rating1 = hvac_c_thermal_capacity/12000/hvac_c_cop*3.5168 wh_tanks = np.array(gld_data['wh']['tank_setpoint']['values']).astype(np.float) hvac_c_status = np.array(gld_data['hvac']['cooling_status']['values']).astype(np.float) wh_rating = np.array(gld_data['wh']['heating_element_capacity']['values']).astype(np.float) battInv_rated = (np.array(gld_data['battInv']['rated_power']['values'])).astype(np.float) batt_rated = (np.array(gld_data['batt']['rated_power']['values'])).astype(np.float) solar_rated = (np.array(gld_data['solar']['rated_power']['values'])).astype(np.float) # Device Power Outputs battInv_power = (np.array(gld_data['battInv']['power']['values'])).astype(np.cfloat) solarInv_power = (np.array(gld_data['solarInv']['power']['values'])).astype(np.cfloat) hvac_power = (np.array(gld_data['hvac']['power']['values'])).astype(np.cfloat) wh_power = (np.array(gld_data['wh']['power']['values'])).astype(np.cfloat) solar_VA = (np.array(gld_data['solar']['VA']['values'])).astype(np.cfloat) #aggregating device outputs per adc in adc_agg dict # adc_agg['adc name']['der type']=sum of all devices of der type t=time.time() adc_agg = copy.deepcopy(adc_ind) adc_Prating = {} num_der = {} total_num_der = 0 for adc_num in adc_ind: adc_Prating[adc_num] = {} if "battInv" in adc_agg[adc_num]: adc_agg[adc_num]["battInv"] = np.sum(battInv_power[:, adc_ind[adc_num]['battInv']], 1)/1000 adc_agg[adc_num]["batt_Pout"] = np.sum(battInv_Pout[:, adc_ind[adc_num]['battInv']], 1) / 1000 adc_agg[adc_num]["batt_Qout"] = np.sum(battInv_Qout[:, adc_ind[adc_num]['battInv']], 1) / 1000 adc_agg[adc_num]["total"] = adc_agg[adc_num]["battInv"] adc_Prating[adc_num]["battInv"] = np.sum(battInv_rated[0, adc_ind[adc_num]['battInv']])/1000 adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["battInv"] if "solarInv" in adc_agg[adc_num]: adc_agg[adc_num]["solarInv"] = np.sum(solarInv_power[:, adc_ind[adc_num]['solarInv']], 1) / 1000 adc_agg[adc_num]["solar_Pout"] = np.sum(solarInv_Pout[:, adc_ind[adc_num]['solarInv']], 1) / 1000 adc_agg[adc_num]["solar_Qout"] = np.sum(solarInv_Qout[:, adc_ind[adc_num]['solarInv']], 1) / 1000 adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["solarInv"] adc_Prating[adc_num]["solarInv"] = np.sum(solar_rated[0, adc_ind[adc_num]['solarInv']]) / 1000 adc_Prating[adc_num]["solarVA"] = np.sum(solar_VA[:, adc_ind[adc_num]['solarInv']], 1) / 1000 adc_Prating[adc_num]["total"] = adc_Prating[adc_num]["total"] + adc_Prating[adc_num]["solarInv"] if "hvac" in adc_agg[adc_num]: adc_agg[adc_num]["hvac"] = np.sum(hvac_power[:, adc_ind[adc_num]['hvac']], 1) adc_agg[adc_num]["total"] = adc_agg[adc_num]["total"] + adc_agg[adc_num]["hvac"] adc_Prating[adc_num]["hvac"] =
np.sum(hvac_rating[0, adc_ind[adc_num]['hvac']])
numpy.sum
#!/usr/bin/env python # Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import copy import numpy from functools import reduce from pyscf import gto, lib from pyscf import scf, dft from pyscf import mp from pyscf import cc from pyscf import ao2mo from pyscf.cc import uccsd from pyscf.cc import gccsd from pyscf.cc import addons from pyscf.cc import uccsd_rdm from pyscf.fci import direct_uhf mol = gto.Mole() mol.verbose = 7 mol.output = '/dev/null' mol.atom = [ [8 , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)]] mol.basis = '631g' mol.build() rhf = scf.RHF(mol) rhf.conv_tol_grad = 1e-8 rhf.kernel() mf = scf.addons.convert_to_uhf(rhf) myucc = cc.UCCSD(mf).run(conv_tol=1e-10) mol_s2 = gto.Mole() mol_s2.atom = [ [8 , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)]] mol_s2.basis = '631g' mol_s2.spin = 2 mol_s2.verbose = 5 mol_s2.output = '/dev/null' mol_s2.build() mf_s2 = scf.UHF(mol_s2).run() eris = uccsd.UCCSD(mf_s2).ao2mo() def tearDownModule(): global mol, rhf, mf, myucc, mol_s2, mf_s2, eris mol.stdout.close() mol_s2.stdout.close() del mol, rhf, mf, myucc, mol_s2, mf_s2, eris class KnownValues(unittest.TestCase): # def test_with_df(self): # mf = scf.UHF(mol).density_fit(auxbasis='weigend').run() # mycc = cc.UCCSD(mf).run() # self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7) def test_ERIS(self): ucc1 = cc.UCCSD(mf) nao,nmo = mf.mo_coeff[0].shape numpy.random.seed(1) mo_coeff = numpy.random.random((2,nao,nmo)) eris = cc.uccsd._make_eris_incore(ucc1, mo_coeff) self.assertAlmostEqual(lib.finger(eris.oooo), 4.9638849382825754, 11) self.assertAlmostEqual(lib.finger(eris.ovoo),-1.3623681896983584, 11) self.assertAlmostEqual(lib.finger(eris.ovov), 125.81550684442163, 11) self.assertAlmostEqual(lib.finger(eris.oovv), 55.123681017639598, 11) self.assertAlmostEqual(lib.finger(eris.ovvo), 133.48083527898248, 11) self.assertAlmostEqual(lib.finger(eris.ovvv), 59.421927525288183, 11) self.assertAlmostEqual(lib.finger(eris.vvvv), 43.556602622204778, 11) self.assertAlmostEqual(lib.finger(eris.OOOO),-407.05319440524585, 11) self.assertAlmostEqual(lib.finger(eris.OVOO), 56.284299937160796, 11) self.assertAlmostEqual(lib.finger(eris.OVOV),-287.72899895597448, 11) self.assertAlmostEqual(lib.finger(eris.OOVV),-85.484299959144522, 11) self.assertAlmostEqual(lib.finger(eris.OVVO),-228.18996145476956, 11) self.assertAlmostEqual(lib.finger(eris.OVVV),-10.715902258877399, 11) self.assertAlmostEqual(lib.finger(eris.VVVV),-89.908425473958303, 11) self.assertAlmostEqual(lib.finger(eris.ooOO),-336.65979260175226, 11) self.assertAlmostEqual(lib.finger(eris.ovOO),-16.405125847288176, 11) self.assertAlmostEqual(lib.finger(eris.ovOV), 231.59042209500075, 11) self.assertAlmostEqual(lib.finger(eris.ooVV), 20.338077193028354, 11) self.assertAlmostEqual(lib.finger(eris.ovVO), 206.48662856981386, 11) self.assertAlmostEqual(lib.finger(eris.ovVV),-71.273249852220516, 11) self.assertAlmostEqual(lib.finger(eris.vvVV), 172.47130671068496, 11) self.assertAlmostEqual(lib.finger(eris.OVoo),-19.927660309103977, 11) self.assertAlmostEqual(lib.finger(eris.OOvv),-27.761433381797019, 11) self.assertAlmostEqual(lib.finger(eris.OVvo),-140.09648311337384, 11) self.assertAlmostEqual(lib.finger(eris.OVvv), 40.700983950220547, 11) uccsd.MEMORYMIN, bak = 0, uccsd.MEMORYMIN ucc1.max_memory = 0 eris1 = ucc1.ao2mo(mo_coeff) uccsd.MEMORYMIN = bak self.assertAlmostEqual(abs(numpy.array(eris1.oooo)-eris.oooo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovoo)-eris.ovoo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovov)-eris.ovov).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.oovv)-eris.oovv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovvo)-eris.ovvo).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.ovvv)-eris.ovvv).max(), 0, 11) self.assertAlmostEqual(abs(numpy.array(eris1.vvvv)-eris.vvvv).max(), 0, 11) self.assertAlmostEqual(abs(
numpy.array(eris1.OOOO)
numpy.array
import numpy as np from mpi4py import MPI import openmdao.api as om from rlt import SimpleLDTransfer from mphys import Builder transfer_dtype = "d" # hard-coded ndof for aerodynamic solver ndof_a = 3 class RltDispXfer(om.ExplicitComponent): """ Component to perform displacement transfer using RLT """ def initialize(self): # Set options self.options.declare("xfer_object") self.options.declare("ndof_s") self.options.declare("nn_s") self.options.declare("nn_a") # Flag used to prevent warning for fwd derivative d(u_a)/d(x_a0) self.options.declare("check_partials", default=False) def setup(self): # get the inputs RLT = self.options["xfer_object"] ndof_s = self.options["ndof_s"] nn_s = self.options["nn_s"] nn_a = self.options["nn_a"] self.check_partials = self.options["check_partials"] # get the isAero and isStruct flags from RLT python object # this is done to pseudo parallelize the modal solver, where # only the root proc does the computations. self.isAero = RLT.isAero self.isStruct = RLT.isStruct # set attributes self.transfer = RLT.transfer self.ndof_s = ndof_s self.nn_s = nn_s self.ndof_a = ndof_a self.nn_a = nn_a total_dof_aero = self.nn_a * self.ndof_a if self.isStruct: # RLT depends on TACS vector types. # ustruct : holds the structural states # struct_seed : used as a seed for structural displacements and forces self.tacs = RLT.structSolver.structure self.ustruct = self.tacs.createVec() self.struct_seed = self.tacs.createVec() else: self.ustruct = None # Inputs self.add_input( "x_struct0", distributed=True, shape_by_conn=True, desc="initial structural node coordinates", tags=["mphys_coordinates"], ) self.add_input( "x_aero0", distributed=True, shape_by_conn=True, desc="Initial aerodynamic surface node coordinates", tags=["mphys_coordinates"], ) self.add_input( "u_struct", distributed=True, shape_by_conn=True, desc="Structural node displacements", tags=["mphys_coupling"], ) # Outputs self.add_output( "u_aero", distributed=True, shape=total_dof_aero, val=np.zeros(total_dof_aero), desc="Aerodynamic surface displacements", tags=["mphys_coupling"], ) # TODO disable for now for the modal solver stuff. # Partials # self.declare_partials('u_aero', ['x_aero0','u_struct']) def compute(self, inputs, outputs): # Update transfer object with the current set of CFD points self.transfer.setAeroSurfaceNodes(inputs["x_aero0"]) if self.isStruct: # Set the structural displacements ustruct_array = self.ustruct.getArray() ustruct_array[:] = inputs["u_struct"] self.transfer.setDisplacements(self.ustruct) # Get out the aerodynamic displacements self.transfer.getDisplacements(outputs["u_aero"]) def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode): # TODO check if the partial computations are okay when isStruct is not True on all procs if mode == "fwd": if "u_aero" in d_outputs: if "u_struct" in d_inputs: if self.isStruct: # Set the forward seed on the structural displacements self.struct_seed.zeroEntries() seed_array = self.struct_seed.getArray() seed_array[:] = d_inputs["u_struct"] self.transfer.setDisplacementPerturbation(self.struct_seed) # Retrieve the seed from the aerodynamic displacements u_ad = np.zeros(self.nn_a * self.ndof_a, dtype=transfer_dtype) self.transfer.getAeroSurfacePerturbation(u_ad) d_outputs["u_aero"] += u_ad if "x_aero0" in d_inputs: if self.check_partials: pass else: raise ValueError("Forward mode requested but not implemented") if mode == "rev": if "u_aero" in d_outputs: if "u_struct" in d_inputs: if self.isStruct: # Set the reverse seed from the aero displacements and # retrieve the seed on the structural displacements. # Note: Could also use setDisplacementsSens. self.transfer.zeroReverseSeeds() self.struct_seed.zeroEntries() self.transfer.addAdjointDisplacements(d_outputs["u_aero"], self.struct_seed) # Pull the seed out of the TACS vector and accumulate seed_array = self.struct_seed.getArray() d_inputs["u_struct"] += seed_array[:] if "x_aero0" in d_inputs: # Set the reverse seed from the aero displacements self.transfer.zeroReverseSeeds() if self.isStruct: self.transfer.setDisplacementsSens(self.ustruct, self.struct_seed, d_outputs["u_aero"]) # Retrieve the seed on the aerodynamic surface nodes. x_a0d = np.zeros(self.nn_a * self.ndof_a, dtype=transfer_dtype) self.transfer.setAeroSurfaceNodesSens(x_a0d) d_inputs["x_aero0"] += x_a0d class RltLoadXfer(om.ExplicitComponent): """ Component to perform load transfers using MELD """ def initialize(self): # Set options self.options.declare("xfer_object") self.options.declare("ndof_s") self.options.declare("nn_s") self.options.declare("nn_a") # Flag used to prevent warning for fwd derivative d(u_a)/d(x_a0) self.options.declare("check_partials", default=True) # Set everything we need to None before setup self.transfer = None self.tacs = None self.ndof_s = None self.ndof_a = None self.nn_s = None self.nn_a = None def setup(self): # get the inputs RLT = self.options["xfer_object"] ndof_s = self.options["ndof_s"] nn_s = self.options["nn_s"] nn_a = self.options["nn_a"] self.check_partials = self.options["check_partials"] # get the isAero and isStruct flags from RLT python object # this is done to pseudo parallelize the modal solver, where # only the root proc does the computations. self.isAero = RLT.isAero self.isStruct = RLT.isStruct # set attributes self.transfer = RLT.transfer self.ndof_s = ndof_s self.ndof_a = ndof_a self.nn_s = nn_s self.nn_a = nn_a total_dof_struct = self.nn_s * self.ndof_s if self.isStruct: # RLT depends on TACS vector types. # fstruct : holds the forces on the structural nodes # struct_seed : used as a seed for structural displacements and forces self.tacs = RLT.structSolver.structure self.fstruct = self.tacs.createVec() self.struct_seed = self.tacs.createVec() else: self.fstruct = None # Inputs self.add_input( "x_struct0", distributed=True, shape_by_conn=True, desc="initial structural node coordinates", tags=["mphys_coordinates"], ) self.add_input( "x_aero0", distributed=True, shape_by_conn=True, desc="Initial aerodynamic surface node coordinates", tags=["mphys_coordinates"], ) self.add_input( "u_struct", distributed=True, shape_by_conn=True, desc="Structural node displacements", tags=["mphys_coupling"], ) self.add_input( "f_aero", distributed=True, shape_by_conn=True, desc="Aerodynamic force vector", tags=["mphys_coupling"] ) # Outputs self.add_output( "f_struct", distributed=True, shape=total_dof_struct, desc="structural force vector", tags=["mphys_coupling"], ) # TODO disable for now for the modal solver stuff. # Partials # self.declare_partials('f_struct', ['x_aero0','f_aero']) def compute(self, inputs, outputs): # Update transfer object with the current set of CFD points self.transfer.setAeroSurfaceNodes(inputs["x_aero0"]) if self.isStruct: # Set the aerodynamic forces and extract structural forces self.fstruct.zeroEntries() self.transfer.addAeroForces(inputs["f_aero"], self.fstruct) if self.isStruct: # Get numpy array version of structural forces f_s = self.fstruct.getArray() outputs["f_struct"] = -f_s[:] # This negative sign was necessary, not exactly sure why def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode): if mode == "fwd": if "f_struct" in d_outputs: if "f_aero" in d_inputs: # Set the forward seed on the aerodynamic forces and pull it # out on struct_seed self.struct_seed.zeroEntries() self.transfer.addAeroForces(d_inputs["f_aero"], self.struct_seed) f_sd = self.struct_seed.getArray() d_outputs["f_struct"] -= f_sd[:] if "x_aero0" in d_inputs: if self.check_partials: pass else: raise ValueError("Forward mode requested but not implemented") if mode == "rev": if "f_struct" in d_outputs: # Set the reverse seed on the structural forces into the # struct_seed vector self.transfer.zeroReverseSeeds() self.struct_seed.zeroEntries() seed_array = self.struct_seed.getArray() seed_array[:] = d_outputs["f_struct"] if "f_aero" in d_inputs: # Extract the reverse seed on the aerodynamic forces f_ab = np.zeros(self.nn_a * self.ndof_a, dtype=transfer_dtype) self.transfer.addAeroForcesSens(np.ravel(inputs["f_aero"]), np.ravel(f_ab), self.struct_seed) d_inputs["f_aero"] = -f_ab if "x_aero0" in d_inputs: # Set up numpy arrays. We need the tmp array as a # placeholder for unneeded data from addAeroForcesSens x_a0d =
np.zeros(self.nn_a * self.ndof_a, dtype=transfer_dtype)
numpy.zeros
import numpy as np import os import csv import physics as phys import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from matplotlib.pyplot import figure import matplotlib.pylab as pylab import DataAnalysis as Data import utils import GenerationRate.BandToBandTunneling as BTB from scipy.optimize import curve_fit params = {'legend.fontsize': 'x-large', 'figure.figsize': (20, 9.3), 'axes.labelsize': 'x-large', 'axes.titlesize':'x-large', 'xtick.labelsize':'x-large', 'ytick.labelsize':'x-large'} pylab.rcParams.update(params) plt.rcParams.update({'font.size': 9}) # 物理常數 kB = 1.38e-23 # [J/k] me = 9.11e-31 # [kg] e = 1.6e-19 # [C] eps_InP = 12.5 * 8.85e-14 # [F/cm] eps_InGaAs = 13.9 * 8.85e-14 # [F/cm] In 0.53 Ga 0.47 As eps_InGaAsP = 13.436 * 8.85e-14 # [F/cm] Approximated by In 0.53 Ga 0.47 As 0.65 P 0.35 h_bar = 1.054e-34 # [J-s] Eti = {'InP': -0.025, 'InGaAs': 0.16} # 繪圖參數 count = 6 ColorSet10 = ['orangered', 'yellowgreen', 'goldenrod', 'darkviolet', 'darkorange', 'brown', 'b', 'r', 'fuchsia', 'g'] LineSet2 = ['-', '-.'] ColorModel = {'SRH': 'r', 'TAT': 'b'} class CurrentFitting(object): def __init__(self, RawIV, voltage_settings, temperature, mode, electric_field, doping, Lifetime, effective_mass, structure, others, trap_finding): # 讀取IV,這裡必須給出 RawIV,不論TCAD還是實驗。 self.RawIV = RawIV # 溫度設定 self.T_analysis, self.T_analysis_IT, self.T_min, self.T_max, self.T_analysis_v_max = temperature self.v_min, self.v_max, v_max_range, self.Vpt, self.V1, self.V2 = voltage_settings self.method, self.mechanism, self.material = mode location_electric_field, label_electric_field = electric_field self.Lifetime_p, self.Lifetime_n, self.Lifetime_ref = Lifetime location_doping, label_doping = doping self.epitaxy, self.interface_um, self.A = structure # interface_um = [-3.62, -3.5, -0.5] self.ND, self.Ncharge, self.d_mul, self.d_ch, self.ND_abs, self.d_InGaAs = self.epitaxy self.effective_mass_InP = effective_mass['InP'] self.effective_mass_InGaAs = effective_mass['InGaAs'] self.RawLocation, self.I_InP_max, self.TCAD_IV, self.TCAD_lifetime, self.TCAD_check = others self.Eti, self.Eti_error = trap_finding # 設定電壓範圍 v_step = 0.1 iterations = (self.v_max['InGaAs'] - self.v_min['InP']) / v_step self.voltage = np.asarray([round(-self.v_min['InP'] - v_step * i, 1) for i in range(int(iterations))]) self.V_InP = np.asarray([element for element in self.voltage if abs(self.v_min['InP']) <= abs(element) <= self.v_max['InP']]) self.V_InGaAs = np.asarray([element for element in self.voltage if abs(self.v_min['InGaAs']) <= abs(element) <= self.v_max['InGaAs']]) if v_max_range == 'All': for T in self.T_analysis: self.T_analysis_v_max[T] = self.T_analysis_v_max[T] - 0.3 elif v_max_range == 'Partial': self.T_analysis_v_max = {T: self.v_max['InGaAs'] for T in self.T_analysis} # else: raise BaseException("Wrong InGaAs analysis range: %s" % v_max_range) # 製作 guess & bound def tolerance(material, trap_level, error): if material == 'InP': lower_bound = max(trap_level - 0.5 * error * phys.Eg_InP(300), - 0.5 * error * phys.Eg_InP(300)) upper_bound = min(trap_level + 0.5 * error * phys.Eg_InP(300), 0.5 * error * phys.Eg_InP(300)) return lower_bound, upper_bound elif material == 'InGaAs': lower_bound = max(trap_level - 0.5 * error * phys.Eg_InGaAs(300), - 0.5 * phys.Eg_InGaAs(300)) upper_bound = min(trap_level + 0.5 * error * phys.Eg_InGaAs(300), 0.5 * phys.Eg_InGaAs(300)) return lower_bound, upper_bound else: raise BaseException("Wrong material (InP/InGaAs): %s" % material) Bounds = {'InP': tolerance('InP', self.Eti['InP'], self.Eti_error['InP']), 'InGaAs': tolerance('InGaAs', self.Eti['InGaAs'], self.Eti_error['InGaAs'])} SRH_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis} SRH_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 10, 10]) for T in self.T_analysis} SRH_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis} SRH_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.1, 0.1], [Bounds['InGaAs'][1], 10, 10]) for T in self.T_analysis} TAT_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis} TAT_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 1.5, 1.5]) for T in self.T_analysis} TAT_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis} TAT_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.5, 0.85], [Bounds['InGaAs'][1], 1.5, 1.5]) for T in self.T_analysis} # 製作 guess & bounds for IT fitting (Eti, tp, tn, alpha_p, alpha_n) SRH_InP_guess_IT = {V: [self.Eti['InP'], 1, 1, 10, 1] for V in self.V_InP} SRH_InP_bound_IT = {V: ([Bounds['InP'][0], 1, 1, 0.1, 0.1], [Bounds['InP'][1], 3, 3, 10, 10]) for V in self.V_InP} SRH_InGaAs_guess_IT = {V: [self.Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs} SRH_InGaAs_bound_IT = {V: ([Bounds['InGaAs'][0], 1e-1, 1, 0, 0], [Bounds['InGaAs'][1], 1, 10, 8, 8]) for V in self.V_InGaAs} TAT_InP_guess_IT = {V: [Eti['InP'], 1, 1, 4, 4] for V in self.V_InP} TAT_InP_bound_IT = {V: ([- phys.Eg_InP(300) / 2, 0.8, 0.8, 1, 1], [phys.Eg_InP(300) / 2, 1.5, 1.5, 8, 8]) for V in self.V_InP} TAT_InGaAs_guess_IT = {V: [Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs} TAT_InGaAs_bound_IT = {V: ([-phys.Eg_InGaAs(300) / 2, 1e-1, 1, 0, 0], [phys.Eg_InGaAs(300) / 2, 1, 10, 8, 8]) for V in self.V_InGaAs} self.guess = {'InP': {'SRH': {'IV': SRH_InP_guess_IV, 'IT': SRH_InP_guess_IT}, 'TAT': {'IV': TAT_InP_guess_IV, 'IT': TAT_InP_guess_IT}}, 'InGaAs': {'SRH': {'IV': SRH_InGaAs_guess_IV, 'IT': SRH_InGaAs_guess_IT}, 'TAT': {'IV': TAT_InGaAs_guess_IV, 'IT': TAT_InGaAs_guess_IT}}} self.bound = {'InP': {'SRH': {'IV': SRH_InP_bound_IV, 'IT': SRH_InP_bound_IT}, 'TAT': {'IV': TAT_InP_bound_IV, 'IT': TAT_InP_bound_IT}}, 'InGaAs': {'SRH': {'IV': SRH_InGaAs_bound_IV, 'IT': SRH_InGaAs_bound_IT}, 'TAT': {'IV': TAT_InGaAs_bound_IV, 'IT': TAT_InGaAs_bound_IT}}} # 讀取 InP & InGaAs 最大電場與偏壓的分佈 self.Ef_InP = Data.CSV(location_electric_field['InP'], label_electric_field['InP'], label_electric_field['InP']) self.Ef_InGaAs = Data.CSV(location_electric_field['InGaAs'], label_electric_field['InGaAs'], label_electric_field['InGaAs']) self.DopingProfile = Data.DopingProfile(location_doping, label_doping, label_doping) # self.material_voltage = {'InP': self.V_InP, 'InGaAs': self.V_InGaAs} self.weight = {'InP': 1 / abs(self.V_InP), 'InGaAs': 1 / abs(self.V_InGaAs)} self.result = dict() for item in self.method: if item == 'IV': self.result['IV'] = {item: {model: {T: self.FitIV(T, item, model, self.guess[item][model]['IV'][T], self.bound[item][model]['IV'][T], fitsigma=1.5) for T in self.T_analysis} for model in self.mechanism} for item in self.material} self.Lifetime = {item: {model: {T: self.result['IV'][item][model][T][2] for T in self.T_analysis} for model in self.mechanism} for item in self.material} self.Lifetime['InGaAsP'] = {model: {T: [self.Lifetime_p['InGaAsP'], self.Lifetime_n['InGaAsP']] for T in self.T_analysis} for model in self.mechanism} if item == 'IT': self.result['IT'] = {item: {model: {V: self.FitIT(V, item, model, self.guess[item][model]['IT'][V], self.bound[item][model]['IT'][V], fitsigma=1) for V in self.material_voltage[item]} for model in self.mechanism} for item in self.material} ''' self.BTB = {item: {T: self.PlotIV(T, item, 'BTB', ['All', self.effective_mass_InP]) for T in self.T_analysis} for item in self.material} ''' def read_data(self, temperature): return self.RawIV[temperature] def read_result(self): return self.result def room_temperature(self): min = 1e4 RT = None for T in self.T_analysis: if abs(300 - T) < min: min = abs(300 - T) RT = T return RT def dm_InP(self, E_Vcm, ND, ND_c, d_mul, d_charge): d = E_Vcm * eps_InP / (e * ND) # [cm] if type(d) is np.ndarray: dm_list = [] for i, x in enumerate(d): if x <= d_mul: dm_list.append(x) else: E2 = E_Vcm[i] - (e * ND * d_mul) / eps_InP d2 = E2 * eps_InP / (e * ND_c) if d2 <= d_charge: dm_list.append(d_mul + d2) else: dm_list.append(d_mul + d_charge) return np.asarray(dm_list) # [cm] else: if d <= d_mul: return d # [cm] else: E2 = E_Vcm - (e * ND * d_mul) / eps_InP d2 = E2 * eps_InP / (e * ND_c) if d2 <= d_charge: return d_mul + d2 # [cm] else: return d_mul + d_charge # [cm] def dm_InGaAs(self, E, ND_abs, d_abs): d = E * eps_InGaAs / (e * ND_abs) if type(d) is np.ndarray: dm_list = [] for x in d: if x <= d_abs: dm_list.append(x) else: dm_list.append(d_abs) return np.asarray(dm_list) else: if d <= d_abs: return d else: return d_abs def Em_InP(self, V): return utils.find(self.Ef_InP.X, self.Ef_InP.Y, -abs(V), 'linear') def Em_InGaAs(self, V): return utils.find(self.Ef_InGaAs.X, self.Ef_InGaAs.Y, -abs(V), 'linear') def FitIV(self, T, material, type, guess, bound, fitsigma): """ :param T: :param material: :return: V, I, popt """ if material == 'InP': V_InP = np.asarray([V for V in self.RawIV[T].X if -self.v_min['InP'] >= V > -self.v_max['InP']]) F_InP = np.asarray([self.Em_InP(V) for V in V_InP]) I_InP = np.asarray([abs(I) for i, I in enumerate(self.RawIV[T].Y) if self.RawIV[T].X[i] in V_InP]) def lifetime(tp, tn): alpha = 1.5 tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha return tau_p, tau_n if type == 'TAT': def TAT_InP_IV(X, Eti, tp, tn): Emax_Vcm, T = X alpha = 1.5 # tp = 1 # tn = 0.1 mt = self.effective_mass_InP prefactor = 1 me = 9.11e-31 Nc300 = 5.716e17 # [cm-3] Nv300 = 1.143e19 # [cm-3] tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm log10_Current = [] for i, x in enumerate(dM): if x <= self.d_mul: E2 = E1[i] - (e * self.ND * x) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] log10_Current.append( np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(x + d_Gamma_1)) else: E2 = E1[i] - (e * self.ND * self.d_mul) / eps_InP E3 = E2 - (e * self.Ncharge * (x - self.d_mul)) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \ (np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm] log10_Current.append( np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10( x + d_Gamma_1 + d_Gamma_2)) return np.asarray(log10_Current) TAT_InP_popt, TAT_InP_pcov = curve_fit(TAT_InP_IV, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma) print('[TAT] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' % (T, TAT_InP_popt[0], TAT_InP_popt[1], TAT_InP_popt[2])) Eti = TAT_InP_popt[0] mt = self.effective_mass_InP tau_p, tau_n = lifetime(TAT_InP_popt[1], TAT_InP_popt[2]) return V_InP, 10 ** TAT_InP_IV((F_InP, T), *TAT_InP_popt), [tau_p, tau_n], Eti, mt elif type == 'SRH': def SRH_InP(X, Eti, tp, tn): """ 使用 -U ~ ni * cosh(-(Eti+ln(tp/tn))/kT) 之近似公式,而不需要使用 |Eti| >> kT 之公式。 內建正確的 lifetime。 :param X: (T, Emax_Vcm) :param Eti: eV :return: np.log10(I) """ Emax_Vcm, T = X alpha = 1.5 # 1 # tp = 1 # 0.1 # tn = 1 # 0.226 prefactor = 1 me = 9.11e-31 Nc300 = 5.716e17 # [cm-3] Nv300 = 1.143e19 # [cm-3] tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(- e * phys.Eg_InP(T) / (2 * kB * T)) G_SRH = ni / ( 2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM) popt_SRH_InP, pcov_SRH_InP = curve_fit(SRH_InP, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma) print('[SRH] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' % (T, popt_SRH_InP[0], popt_SRH_InP[1], popt_SRH_InP[2])) Eti = popt_SRH_InP[0] mt = self.effective_mass_InP tau_p, tau_n = lifetime(popt_SRH_InP[1], popt_SRH_InP[2]) return V_InP, 10 ** SRH_InP((F_InP, T), *popt_SRH_InP), [tau_p, tau_n], Eti, mt else: raise BaseException("Wrong type: %s" % type) elif material == 'InGaAs': V_InGaAs = np.asarray([V for V in self.RawIV[T].X if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']]) F_InGaAs = np.asarray([self.Em_InGaAs(V) for V in V_InGaAs]) I_InGaAs = np.asarray([abs(I) - self.I_InP_max for i, I in enumerate(self.RawIV[T].Y) if self.RawIV[T].X[i] in V_InGaAs]) # check negative current for current in I_InGaAs: if current < 0: raise BaseException("please decrease the I(InP) maximum: %s" % self.I_InP_max) def lifetime(tp, tn): alpha = 1.5 tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha return tau_p, tau_n if type == 'TAT': def TAT_InGaAs_IV(X, Eti, tp, tn): Emax_Vcm, T = X prefactor = 1 # tp = 1 # tn = 1 mt = self.effective_mass_InGaAs alpha = 1.5 me = 9.11e-31 Nc300 = 2.53956e17 # [cm-3] Nv300 = 7.51e18 # [cm-3] tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InGaAs(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InGaAs(Emax_Vcm, self.ND_abs, self.d_InGaAs) # [cm] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm E2 = 0 d_Gamma = (np.sqrt(3 * np.pi) * eps_InGaAs * F_Gamma) / (e * self.ND_abs) * \ (np.exp((E1 / F_Gamma) ** 2) - np.exp((E2 / F_Gamma) ** 2)) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma) if len(V_InGaAs) == 0: return V_InGaAs, [], [0, 0], None, None else: TAT_InGaAs_popt, TAT_InGaAs_pcov = curve_fit(TAT_InGaAs_IV, (F_InGaAs, T), np.log10(I_InGaAs), p0=guess, bounds=bound, sigma=abs(np.log10(I_InGaAs)) ** fitsigma) print('[TAT] InGaAs (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' % (T, TAT_InGaAs_popt[0], TAT_InGaAs_popt[1], TAT_InGaAs_popt[2])) Eti = TAT_InGaAs_popt[0] mt = self.effective_mass_InGaAs tau_p, tau_n = lifetime(TAT_InGaAs_popt[1], TAT_InGaAs_popt[2]) return V_InGaAs, 10 ** TAT_InGaAs_IV((F_InGaAs, T), *TAT_InGaAs_popt) + \ np.ones(len(V_InGaAs)) * self.I_InP_max, [tau_p, tau_n], Eti, mt elif type == 'SRH': def SRH_InGaAs_IV(X, Eti, tp, tn): Emax_Vcm, T = X prefactor = 1 # tp = 1 # tn = 1 alpha = 1.5 me = 9.11e-31 Nc300 = 2.53956e17 # [cm-3] Nv300 = 7.51e18 # [cm-3] tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha ND_abs = 7.5e14 # [cm-3] d_InGaAs = 3e-4 # [cm] ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InGaAs(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InGaAs(Emax_Vcm, ND_abs, d_InGaAs) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM) if len(V_InGaAs) == 0: return V_InGaAs, [], [0, 0], None else: SRH_InGaAs_popt, SRH_InGaAs_pcov = curve_fit(SRH_InGaAs_IV, (F_InGaAs, T), np.log10(I_InGaAs), p0=guess, bounds=bound, sigma=abs(np.log10(I_InGaAs)) ** fitsigma) print('[SRH] InGaAs (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' % (T, SRH_InGaAs_popt[0], SRH_InGaAs_popt[1], SRH_InGaAs_popt[2])) Eti = SRH_InGaAs_popt[0] tau_p, tau_n = lifetime(SRH_InGaAs_popt[1], SRH_InGaAs_popt[2]) return V_InGaAs, 10 ** SRH_InGaAs_IV((F_InGaAs, T), *SRH_InGaAs_popt) + \ np.ones(len(V_InGaAs)) * self.I_InP_max, [tau_p, tau_n], Eti else: raise BaseException("Wrong type: %s" % type) else: raise BaseException("Wrong material: %s" % material) def FitIT(self, V, material, type, guess, bound, fitsigma): if material == 'InP': I_InP = np.asarray([utils.find(self.RawIV[T].X, abs(self.RawIV[T].Y), V, 'log') for T in self.T_analysis_IT]) if type == 'TAT': def TAT_InP_IT(X, Eti, tp, tn, alpha_p, alpha_n): T, Emax_Vcm = X mt = self.effective_mass_InP prefactor = 1 me = 9.11e-31 Nc300 = 5.716e17 # [cm-3] Nv300 = 1.143e19 # [cm-3] tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm if dM <= self.d_mul: E2 = E1 - (e * self.ND * dM) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1 / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma_1) else: E2 = E1 - (e * self.ND * self.d_mul) / eps_InP E3 = E2 - (e * self.Ncharge * (dM - self.d_mul)) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1 / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \ (np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma_1 + d_Gamma_2) popt, pcov = curve_fit(TAT_InP_IT, (self.T_analysis_IT, self.Em_InP(V)), np.log10(I_InP), p0=guess, bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma) Eti, tp, tn, alpha_p, alpha_n = popt print('[TAT] InP (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' % (V, Eti, tp, tn, alpha_p, alpha_n)) return self.T_analysis_IT, 10 ** TAT_InP_IT((self.T_analysis_IT, self.Em_InP(V)), *popt), \ Eti, [tp, tn, alpha_p, alpha_n] elif type == 'SRH': def SRH_InP_IT(X, Eti, tp, tn, alpha_n, alpha_p): T, Emax_Vcm = X # tp = 1 # tn = 1 prefactor = 1 Nc300 = 5.716e17 # [cm-3] Nv300 = 1.143e19 # [cm-3] tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM) popt, pcov = curve_fit(SRH_InP_IT, (self.T_analysis_IT, self.Em_InP(V)), np.log10(I_InP), p0=guess, bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma) Eti, tp, tn, alpha_p, alpha_n = popt print('[SRH] InP (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' % (V, Eti, tp, tn, alpha_p, alpha_n)) return self.T_analysis_IT, 10 ** SRH_InP_IT((self.T_analysis_IT, self.Em_InP(V)), *popt), \ Eti, [tp, tn, alpha_p, alpha_n] else: raise BaseException("Wrong type: %s" % type) elif material == 'InGaAs': I_InGaAs = np.asarray([utils.find(self.RawIV[T].X, abs(self.RawIV[T].Y) - self.I_InP_max, V, 'log') for T in self.T_analysis_IT]) # check I(InGaAs) for current in I_InGaAs: if current < 0: raise BaseException("please decrease the I(InP) maximum: %s" % self.I_InP_max) # 檢查電流是否隨著溫度遞增 if abs(V) > abs(self.T_analysis_v_max[self.T_analysis_IT[0]]): raise BaseException("Voltage is too large: %s > Vmax(InGaAs,240K) = %s" % (abs(V), abs(self.T_analysis_v_max[self.T_analysis_IT[0]]))) if type == 'TAT': def TAT_InGaAs_IT(X, Eti, tp, tn, alpha_p, alpha_n): T, Emax_Vcm = X prefactor = 1 mt = self.effective_mass_InGaAs me = 9.11e-31 Nc300 = 2.53956e17 # [cm-3] Nv300 = 7.51e18 # [cm-3] tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n ND_abs = 3.53e14 # [cm-3] d_InGaAs = 3e-4 # [cm] ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(- e * phys.Eg_InGaAs(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InGaAs(Emax_Vcm, ND_abs, d_InGaAs) # [cm] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm E2 = 0 d_Gamma = (np.sqrt(3 * np.pi) * eps_InGaAs * F_Gamma) / (e * ND_abs) * \ (np.exp((E1 / F_Gamma) ** 2) - np.exp((E2 / F_Gamma) ** 2)) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma) popt, pcov = curve_fit(TAT_InGaAs_IT, (self.T_analysis_IT, self.Em_InGaAs(V)), np.log10(I_InGaAs), p0=guess, bounds=bound, sigma=abs(np.log10(I_InGaAs)) ** fitsigma) Eti, tp, tn, alpha_p, alpha_n = popt print('[TAT] InGaAs (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' % (V, Eti, tp, tn, alpha_p, alpha_n)) return self.T_analysis_IT, 10 ** TAT_InGaAs_IT((self.T_analysis_IT, self.Em_InGaAs(V)), *popt) + \ np.ones(len(self.T_analysis_IT)) * self.I_InP_max, Eti, [tp, tn, alpha_p, alpha_n] elif type == 'SRH': def SRH_InGaAs_IT(X, Eti, tp, tn, alpha_p, alpha_n): T, Emax_Vcm = X prefactor = 1 Nc300 = 2.53956e17 # [cm-3] Nv300 = 7.51e18 # [cm-3] tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s] tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s] tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n ND_abs = 7.5e14 # [cm-3] d_InGaAs = 3e-4 # [cm] ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(- e * phys.Eg_InGaAs(T) / (2 * kB * T)) G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n))) dM = self.dm_InGaAs(Emax_Vcm, ND_abs, d_InGaAs) # [cm] return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM) popt, pcov = curve_fit(SRH_InGaAs_IT, (self.T_analysis_IT, self.Em_InGaAs(V)), np.log10(I_InGaAs), p0=guess, bounds=bound, sigma=abs(np.log10(I_InGaAs)) ** fitsigma) Eti, tp, tn, alpha_p, alpha_n = popt print('[SRH] InGaAs (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' % (V, Eti, tp, tn, alpha_p, alpha_n)) return self.T_analysis_IT, 10 ** SRH_InGaAs_IT((self.T_analysis_IT, self.Em_InGaAs(V)), *popt) + \ np.ones(len(self.T_analysis_IT)) * self.I_InP_max, Eti, [tp, tn, alpha_p, alpha_n] else: raise BaseException("Wrong type: %s" % type) else: raise BaseException("Wrong material: %s" % material) def d_Gamma(self, T, material): if material == 'InP': V_InP = np.asarray([V for V in self.RawIV[T].X if -self.v_min['InP'] >= V > -self.v_max['InP']]) F_InP = np.asarray([self.Em_InP(V) for V in V_InP]) def Gamma_InP(X): Emax_Vcm, T = X mt = self.effective_mass_InP me = 9.11e-31 dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm d_Gamma = [] for i, x in enumerate(dM): if x <= self.d_mul: E2 = E1[i] - (e * self.ND * x) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] d_Gamma.append(d_Gamma_1) else: E2 = E1[i] - (e * self.ND * self.d_mul) / eps_InP E3 = E2 - (e * self.Ncharge * (x - self.d_mul)) / eps_InP d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \ (np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm] d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \ (np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm] d_Gamma.append(d_Gamma_1 + d_Gamma_2) return np.asarray(d_Gamma) return V_InP, Gamma_InP((F_InP, T)) elif material == 'InGaAs': V_InGaAs = np.asarray([V for V in self.RawIV[T].X if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']]) F_InGaAs = np.asarray([self.Em_InGaAs(V) for V in V_InGaAs]) def Gamma_InGaAs(X): Emax_Vcm, T = X mt = self.effective_mass_InGaAs me = 9.11e-31 ND_abs = 7.5e14 # [cm-3] F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm] E1 = Emax_Vcm E2 = 0 d_Gamma = (np.sqrt(3 * np.pi) * eps_InGaAs * F_Gamma) / (e * ND_abs) * \ (np.exp((E1 / F_Gamma) ** 2) - np.exp((E2 / F_Gamma) ** 2)) # [cm] return d_Gamma return V_InGaAs, Gamma_InGaAs((F_InGaAs, T)) def PlotIV(self, T, material, type, parameter): if material == 'InP': if type == 'BTB': V_range, effective_mass = parameter if V_range == 'All': V_BTB = self.RawIV[T].X elif V_range == 'InP': V_BTB = np.asarray([V for V in self.RawIV[T].X if -self.v_min['InP'] >= V > -self.v_max['InP']]) elif V_range == 'InGaAs': V_BTB = np.asarray([V for V in self.RawIV[T].X if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']]) else: raise BaseException("Wrong parameter: %s" % parameter) I_BTB = [] for V in V_BTB: Ej = self.Em_InP(V) E_Vcm_array = self.DopingProfile.FieldTransform(Ej, self.interface_um) I_BTB.append(BTB.J_BTB_InP(self.DopingProfile.X * 1e-4, E_Vcm_array, T, effective_mass) * self.A) return V_BTB, I_BTB elif material == 'InGaAs': V_InGaAs =
np.asarray([V for V in self.RawIV[T].X if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']])
numpy.asarray
#!/usr/bin/env python3 import os import re import sys import freud import matplotlib.pyplot as plt import numpy as np import pandas as pd # Magic to get other definitions in place sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'lib')) from common import radial_average, ragged_mean from stylelib.common_styles import septin_runs_stl def Compute_U_FFT(Lx, Nx, r, z): xpixel = Lx/Nx Nxgrid = Nx * 1j grid_x, grid_y = np.mgrid[-Lx/2:Lx/2:Nxgrid, -Lx/2:Lx/2:Nxgrid] from scipy.interpolate import griddata grid_z = griddata(r, z, (grid_x, grid_y), method = 'cubic') grid_z_nearest = griddata(r, z, (grid_x, grid_y), method = 'nearest') grid_z[np.isnan(grid_z)] = grid_z_nearest[np.isnan(grid_z)] u = np.fft.fft2(grid_z) / (Nx * Nx) ushift = np.fft.fftshift(u) freqx = np.fft.fftshift(np.fft.fftfreq(ushift.shape[1], xpixel)) uq_2d_fft_qcutoff = (freqx[1] - freqx[0]) return [ushift, uq_2d_fft_qcutoff] def Compute_U_DirectFast(Lx, ndirect, r, z): xvec = 2.0*np.pi/Lx*
np.linspace(-ndirect, ndirect, 2*ndirect+1)
numpy.linspace
""" Unit Tests for Py-ART's core/radar.py module. """ import sys # we need a class which excepts str for writing in Python 2 and 3 try: from StringIO import StringIO except ImportError: from io import StringIO import inspect import numpy as np from numpy.testing import assert_raises import pyart def test_rays_per_sweep_attribute(): radar = pyart.testing.make_target_radar() rays_per_sweep = radar.rays_per_sweep assert isinstance(rays_per_sweep, dict) assert rays_per_sweep['data'].shape == (1, ) assert rays_per_sweep['data'][0] == 360 def test_iterators(): radar = pyart.testing.make_empty_ppi_radar(30, 20, 5) radar.fields['reflectivity'] = { 'data': np.zeros((100, 30), dtype=np.float32)} starts = [0, 20, 40, 60, 80] ends = [19, 39, 59, 79, 99] starts_ends = [(s, e) for s, e in zip(starts, ends)] assert inspect.isgenerator(radar.iter_start()) assert [s for s in radar.iter_start()] == starts assert inspect.isgenerator(radar.iter_end()) assert [s for s in radar.iter_end()] == ends assert inspect.isgenerator(radar.iter_start_end()) assert [s for s in radar.iter_start_end()] == starts_ends assert inspect.isgenerator(radar.iter_slice()) for s, start, end in zip(radar.iter_slice(), starts, ends): assert s.start == start assert s.stop == end + 1 assert s.step is None assert inspect.isgenerator(radar.iter_field('reflectivity')) for d in radar.iter_field('reflectivity'): assert d.shape == (20, 30) assert d.dtype == np.float32 assert_raises(KeyError, radar.iter_field, 'foobar') assert inspect.isgenerator(radar.iter_azimuth()) for d in radar.iter_azimuth(): assert d.shape == (20, ) assert inspect.isgenerator(radar.iter_elevation()) for d in radar.iter_elevation(): assert d.shape == (20, ) def test_get_methods(): radar = pyart.testing.make_empty_ppi_radar(30, 20, 5) radar.fields['reflectivity'] = { 'data': np.zeros((100, 30), dtype=np.float32)} assert radar.get_start(0) == 0 assert radar.get_start(1) == 20 assert_raises(IndexError, radar.get_start, -1) assert_raises(IndexError, radar.get_start, 20) assert radar.get_end(0) == 19 assert radar.get_end(1) == 39 assert_raises(IndexError, radar.get_end, -1) assert_raises(IndexError, radar.get_end, 20) assert radar.get_start_end(0) == (0, 19) assert radar.get_start_end(1) == (20, 39) assert_raises(IndexError, radar.get_start_end, -1) assert_raises(IndexError, radar.get_start_end, 20) assert radar.get_slice(0) == slice(0, 20) assert radar.get_slice(1) == slice(20, 40) assert_raises(IndexError, radar.get_slice, -1) assert_raises(IndexError, radar.get_slice, 20) data = radar.get_field(0, 'reflectivity') assert data.shape == (20, 30) assert data.dtype == np.float32 data = radar.get_field(1, 'reflectivity') assert data.shape == (20, 30) assert data.dtype == np.float32 assert_raises(KeyError, radar.get_field, 0, 'foobar') assert_raises(IndexError, radar.get_field, -1, 'reflectivity') assert_raises(IndexError, radar.get_field, 20, 'reflectivity') assert radar.get_azimuth(0).shape == (20, ) assert_raises(IndexError, radar.get_azimuth, -1) assert_raises(IndexError, radar.get_azimuth, 20) assert radar.get_elevation(0).shape == (20, ) assert_raises(IndexError, radar.get_elevation, -1) assert_raises(IndexError, radar.get_elevation, 20) assert_raises(LookupError, radar.get_nyquist_vel, 0) radar.instrument_parameters = { 'nyquist_velocity': {'data': np.ones((100,))} } assert round(radar.get_nyquist_vel(0)) == 1 assert_raises(IndexError, radar.get_nyquist_vel, -1) radar.instrument_parameters['nyquist_velocity']['data'][0] = 2 assert_raises(Exception, radar.get_nyquist_vel, 0) def test_extract_sweeps(): radar = pyart.testing.make_empty_ppi_radar(100, 360, 3) radar.fields['reflectivity'] = {'data': np.zeros((1080, 100))} radar.fields['velocity'] = {'data': np.zeros((1080, 100))} eradar = radar.extract_sweeps([0, 2]) # extracted radar should have 720 rays, 2 sweeps, 100 gates assert eradar.time['data'].shape == (720, ) assert eradar.range['data'].shape == (100, ) assert eradar.metadata['instrument_name'] == 'fake_radar' assert eradar.scan_type == 'ppi' assert eradar.latitude['data'].shape == (1, ) assert eradar.longitude['data'].shape == (1, ) assert eradar.altitude['data'].shape == (1, ) assert eradar.altitude_agl is None assert eradar.sweep_number['data'].shape == (2, ) assert eradar.sweep_mode['data'].shape == (2, ) assert eradar.fixed_angle['data'].shape == (2, ) assert eradar.sweep_start_ray_index['data'].shape == (2, ) assert eradar.sweep_end_ray_index['data'].shape == (2, ) assert eradar.target_scan_rate is None assert eradar.azimuth['data'].shape == (720, ) assert eradar.elevation['data'].shape == (720, ) assert eradar.scan_rate is None assert eradar.antenna_transition is None assert eradar.instrument_parameters is None assert eradar.radar_calibration is None assert eradar.ngates == 100 assert eradar.nrays == 720 assert eradar.nsweeps == 2 assert eradar.fields['reflectivity']['data'].shape == (720, 100) assert eradar.fields['velocity']['data'].shape == (720, 100) def test_extract_sweeps_extra(): radar = pyart.testing.make_empty_ppi_radar(10, 36, 3) radar.instrument_parameters = { 'prt': {'data': np.zeros((108, ))}, 'prt_mode': {'data': np.array(['fixed'] * 3)}, 'radar_antenna_gain_h': {'data': np.array(0)}, } radar.radar_calibration = { 'r_calib_index': {'data': np.zeros((108, ))}, 'r_calib_time': {'data': np.zeros((8, ))} } eradar = radar.extract_sweeps([0, 2]) instr = eradar.instrument_parameters assert instr['prt']['data'].shape == (72, ) assert instr['prt_mode']['data'].shape == (2, ) assert instr['radar_antenna_gain_h']['data'].shape == () calib = eradar.radar_calibration assert calib['r_calib_index']['data'].shape == (72, ) assert calib['r_calib_time']['data'].shape == (8, ) def test_extract_sweeps_errors(): radar = pyart.testing.make_empty_ppi_radar(10, 36, 2) assert_raises(ValueError, radar.extract_sweeps, [0, 2]) assert_raises(ValueError, radar.extract_sweeps, [-1, 1]) def test_radar_creation(): radar = pyart.testing.make_target_radar() assert isinstance(radar, pyart.core.Radar) def test_add_field(): radar = pyart.testing.make_target_radar() dic = {'data': np.zeros((360, 50)), 'standard_name': 'test'} radar.add_field('test', dic) assert 'test' in radar.fields assert 'data' in radar.fields['test'] assert radar.fields['test']['standard_name'] == 'test' def test_add_field_errors(): radar = pyart.testing.make_target_radar() assert_raises(ValueError, radar.add_field, 'reflectivity', {}) dic = {'dat': np.zeros((360, 50)), 'standard_name': 'test'} assert_raises(KeyError, radar.add_field, 'test', dic) dic = {'data': np.zeros((360, 49)), 'standard_name': 'test'}
assert_raises(ValueError, radar.add_field, 'test', dic)
numpy.testing.assert_raises
import numpy as np from .sfo import SFO from time import time from scipy.optimize import minimize # NB: binary classification is done with +/- labels def tm_preprocess(X, colnorms=None): """ Preprocessing that seems to make TM more accurate: normalize each column so training data has length 1 (use same normalization constants for training and test) normalize each row to have length 1 (so normalization constant differs at test time) Inputs: X - feature matrix, rows are instances colnorms - vector containing the norm of each colum of the training matrix Outputs: if colnorms is None (training): Xnormalized - the normalized training data colnorms - the vector containing the norm of each column of the training matrix if colnorms is set (testing): Xnormalized - the normalized test data """ returnargs = 1 if colnorms is None: # Train colnorms = np.sqrt(np.sum(X*X, axis=0)) returnargs = 2 Xnormalized = np.copy(X) Xnormalized[:, colnorms > 0] = Xnormalized[:, colnorms > 0] / colnorms[colnorms > 0] rownorms = np.sqrt(np.sum(Xnormalized*Xnormalized, axis=1)) Xnormalized = Xnormalized / rownorms[:, np.newaxis] if returnargs == 1: return Xnormalized elif returnargs == 2: return (Xnormalized, colnorms) def tm_predict(w0, X, q, r, type): """ Returns predicted values based on a learned tensor machine Inputs: w0 - TM factors X,q,r,type - see the description of tm_fit Outputs: z - predictions for each row in X """ (n,d) = X.shape r_vec = np.concatenate(([1], (q-1)*[r])) b = w0[0] w = w0[1:] w = np.reshape(w, (d, len(w)//d)) acc_sum = 0 w_offset = 0 Z = b*np.ones((n,1)) for i in range(q): for j in range(r_vec[i]): # the vectors whose outer product form the jth rank-one term in the # outer product of the coefficients for the degree i+1 term # d-by-i matrix W = w[:, w_offset:(w_offset + i + 1)] XW = X.dot(W) # n-by-(i+1) prodXW = np.prod(XW, axis=1) # n-by-1 prodXW = prodXW[:, np.newaxis] Z = Z + prodXW # n-by-1 w_offset = w_offset + i + 1 if type.upper() == 'REGRESSION': return Z elif type.upper() == 'BC': return np.sign(Z) def tm_f_df(w0, X, y, q, r, type, gamma): """ Computes the TM objective value and gradient for scipy's optimization functions Inputs: w0 - TM factors X,y,q,r,type,gamma - see the description of tm_fit Outputs: f - function value df - gradient of TM factors """ (n,d) = X.shape r_vec = np.concatenate(([1], (q-1)*[r])) b = w0[0] w = w0[1:] w = np.reshape(w, (d, len(w)//d)) nw = w.shape[1] acc_sum = 0 w_offset = 0 Z = b*np.ones((n,1)) bl = np.zeros((n, nw)) for i in range(q): for j in range(r_vec[i]): # the vectors whose outer product form the jth rank-one term in the # outer product of the coefficients for the degree i+1 term # d-by-i matrix W = w[:, w_offset:(w_offset + i + 1)] XW = X.dot(W) # n-by-(i+1) prodXW = np.prod(XW, axis=1) # n-by-1 prodXW = prodXW[:, np.newaxis] # make it a column vector bl[:, w_offset:(w_offset+i+1)] = prodXW / XW Wsquared = W*W norm_squares = np.sum(Wsquared, axis=0) # 1-by-(i+1) acc_sum = acc_sum + np.sum(norm_squares) Z = Z + prodXW # n-by-1 w_offset = w_offset + i + 1 f = 0 diff = np.empty_like(Z) if type.upper() == 'REGRESSION': diff = Z - y; f = np.sum(diff*diff)/n/2 elif type.upper() == 'BC': eyz = np.exp(-y*Z); diff = -y*eyz/(1+eyz) f = np.mean(np.log(1 + eyz)) f = f + gamma*acc_sum/2; df = np.empty_like(w0) df[0] = np.mean(diff) df_w = X.transpose().dot(diff*bl) df_w = df_w + gamma*w; df[1:] = np.reshape(df_w, (len(w0)-1,)) return (f, df) def tm_f_df_sub(w0, indices, X, y, q, r, type, gamma): """ Computes the TM objective value and gradient for SFO solver Inputs: w0 - TM factors indices - list of indexes into the training data defining this minibatch X,y,q,r,type,gamma - see the description of tm_fit Outputs: f - function value df - gradient of TM factors """ minibatchX = X[indices, :] minibatchy = y[indices, :] return tm_f_df0(w0, X, y, q, r, type, gamma) def tm_f_df0(w0, X, y, q, r, type, gamma): """ Computes the TM objective value and gradient for SFO Inputs: w0 - TM factors X,y,q,r,type,gamma - see the description of tm_fit Outputs: f - function value df - gradient of TM factors """ (n,d) = X.shape gamma = n*gamma r_vec = np.concatenate(([1], (q-1)*[r])) b = w0[0] w = w0[1:] w = np.reshape(w, (d, len(w)//d)) nw = w.shape[1] acc_sum = 0 w_offset = 0 Z = b*np.ones((n,1)) bl = np.empty((n, nw)) for i in range(q): for j in range(r_vec[i]): # the vectors whose outer product form the jth rank-one term in the # outer product of the coefficients for the degree i+1 term # d-by-i matrix W = w[:, w_offset:(w_offset + i + 1)] XW = X.dot(W) # n-by-(i+1) prodXW = np.prod(XW, axis=1) # n-by-1 prodXW = prodXW[:, np.newaxis] if i == 0: # dealing with the linear term bl[:, w_offset:(w_offset + i + 1)] = 1 else: for l in range(i+1): idx = np.setdiff1d([j for j in range(i+1)], l) bl[:, w_offset+l] = np.prod(XW[:, idx]*XW[:, idx]) Wsquared = W*W norm_squares = np.sum(Wsquared, axis=0) # 1-by-(i+1) acc_sum = acc_sum + np.sum(norm_squares) Z = Z + prodXW # n-by-1 w_offset = w_offset + i + 1 f = 0 diff = np.empty_like(Z) if type.upper() == 'REGRESSION': diff = Z - y; f = np.sum(diff*diff)/2 elif type.upper() == 'BC': eyz = np.exp(-y*Z); diff = -y*eyz/(1+eyz) f = np.sum(np.log(1 + eyz)) f = f + gamma*acc_sum/2; df = np.empty_like(w0) df[0] = np.sum(diff) df_w = X.transpose().dot(diff*bl) df_w = df_w + gamma*w; df[1:,0] = np.reshape(df_w, (len(w0)-1,)) return (f, df) def tm_fit(X, y, type, r, q, gamma, solver, epochs, alpha, verbosity='minimal', seed=0): """ Inputs: X, y: feature matrix and target vector (numpy arrays) type: 'regression' or 'bc' for binary classification r: rank parameter q: degree of polynomial used gamma: regularization parameter solver: 'LBFGS' or 'SFO' epochs: maxiterations for L-BFGS or number of SFO epochs alpha: scaling factor of the initial weights verbosity: 'off', 'minimal', 'all' seed: seed for random number generation Outputs: w - factors used in the TM model z - predictions of X based on w """ (n,d) = X.shape np.random.seed(seed) nv = 1 + d + ((q-1)*(q+2)*r*d)//2; # how many variables in total are in the factorization w0 = alpha*np.random.randn(nv,1) # set initial weights w = np.empty_like(w0) if solver.upper() == "LBFGS": options = {'maxiter' : epochs } res = minimize(tm_f_df, w0, args=(X,y,q,r,type,gamma), method="L-BFGS-B", jac=True, tol=1e-8, options=options) w = res.x elif solver.upper() == "SFO": N = max(30, int(np.floor(np.sqrt(n)/10))) # number of minibatches minibatch_indices = list() randp = np.array(np.random.permutation(n)) for i in range(N): minibatch_indices.append(randp[i:n:N]) optimizer = SFO(tm_f_df_sub, w0, minibatch_indices, args=(X,y,q,r,type,gamma)) if verbosity.upper() == "OFF": optimizer.display = 0 elif verbosity.upper() == "MINIMAL": optimizer.display = 1 elif verbosity.upper() == "ALL": optimizer.display = 2 w = optimizer.optimize(epochs) opt_outputs = optimizer; #else: # print("Enter a valid solver! scipy's LBFGS and SFO are supported so far") z = tm_predict(w0, X, q, r, type) return (w, z) def tm_solver(Xtrain, ytrain, Xtest, ytest, type, options): """ Takes an input a training and test set and trains tensor machine then evaluates test error Inputs: Xtrain, ytrain - training features and targets Xtest, ytest - test features and targets type - 'regression' or 'bc' (binary classification) options - dictionary containing options for tensor machines (see tm_fit description for more information) Outputs: error_test, error_train: test and training errors (misclassification rate for bc, relative norm for regression) """ (n,d) = Xtrain.shape ntest = Xtest.shape[0] #print("running tensor machine training") #print("data size: %d by %d" % (n,d)) #print("parameters: degree(%d) rank(%d) solver(%s) gamma(%e) maxIter(%d) alpha(%f)" % # (options['q'], options['r'], options['solver'], options['gamma'], # options['maxIter'], options['alpha'])) timeStart = time() (w, predtrain) = tm_fit(Xtrain, ytrain, type, options['r'], options['q'], options['gamma'], options['solver'], options['maxIter'], options['alpha'], options['verbosity']) timeEnd = time() #print("Finished training in %d seconds" % (timeEnd - timeStart)) predtest = tm_predict(w, Xtest, options['q'], options['r'], type) error_train = 1 error_test = 1 if type.upper() == 'BC': predtrain = np.sign(predtrain) predtest =
np.sign(predtest)
numpy.sign
#!/usr/bin/python3.7 """ This module has two classes: DataExtraction and ActivationEnergy. DataExtraction reads csv files and creates pandas.DataFrames according to the isoconversional principle. ActivationEnergy computes the activation energy with five implemented isoconversional methods: Friedman (Fr), Ozawa-Flynn-Wall(OFW), Kissinger-Akahira-Sunos (KAS) and the method developed by Vyazovkin (Vy, aVy). """ #Dependencies import numpy as np import pandas as pd from scipy.interpolate import interp1d from scipy.optimize import minimize_scalar import scipy.special as sp import matplotlib.pyplot as plt from scipy.stats import linregress from scipy import integrate import derivative from scipy.optimize import fsolve #----------------------------------------------------------------------------------------------------------- class DataExtraction(object): """ Extractor to manipulate raw data to create lists and Data Frames that will be used to compute the Activation Energy. """ def __init__(self): """ Constructor. Parameters: None Notes: It only defines variables. """ self.DFlis = [] #list of DataFrames containing data self.seg_DFlis = [] #list of DataFrames segmented by temperature self.Beta = [] #list of heating rates self.BetaCC = [] #list of correlation coefficient for T vs t self.files = [] #list of files containing raw data self.da_dt = [] #list of experimental conversion rates self.T = [] #list of experimental temperature in Kelvin self.T0 = [] #list of experimental inicial temperature in Kelvin self.t = [] #list off experimental time self.alpha = [] #list of experimental conversion self.TempIsoDF = pd.DataFrame() #Isoconversional temperature DataFrame self.timeIsoDF = pd.DataFrame() #Isoconversional time DataFrame self.diffIsoDF = pd.DataFrame() #Isoconversional conversion rate DataFrame self.TempAdvIsoDF = pd.DataFrame() #Advanced isoconversional temperature DataFrame self.timeAdvIsoDF = pd.DataFrame() #Advanced isoconversional time DataFrame #----------------------------------------------------------------------------------------------------------- def read_files(self, flist, encoding='utf8'): """ Reads each TGA file as a pandas DataFrame and calculates de heating rate Parameters: flist : list object containing the paths of the files to be used. encoding : The available encodings for pandas.read_csv() method. Includes but not limited to 'utf8', 'utf16','latin1'. For more information on the python standar encoding: (https://docs.python.org/3/library/codecs.html#standard-encodings) """ print("Files to be used: \n{}\n ".format(flist)) DFlis = self.DFlis Beta = self.Beta BetaCorrCoeff = self.BetaCC T0 = self.T0 print(f'Reading files and creating DataFrames...\n') for item in flist: #csv files can use a tab or a coma as separator. try: DF = pd.read_csv(item, sep = '\t', encoding = encoding) #stores the initial temperature of the ith experiment T0.append(DF[DF.columns[1]][0]+273.15) #computes the mass loss percentage DF['%m'] = 100*(DF[DF.columns[2]]/DF[DF.columns[2]][0]) #creates a column for the temperature in Kelvin DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15 #computes the heating rate with a Savitzki-Golay filter dTdt = derivative.dxdt(DF['Temperature [K]'].values, DF[DF.columns[0]].values, kind="savitzky_golay", order=3, left=0.5, right=0.5) DF['dT/dt$'] = DF[DF.columns[0]] DF['dT/dt$'] = dTdt #computes the differential thermogram with a Savitzki-Golay filter dwdt = derivative.dxdt(DF[DF.columns[2]].values, DF[DF.columns[0]].values, kind="savitzky_golay", order=3, left=0.5, right=0.5) DF['dw/dt'] = DF[DF.columns[0]] DF['dw/dt'] = dwdt #computes the heating rate LR = linregress(DF[DF.columns[0]], DF[DF.columns[1]]) BetaCorrCoeff.append(LR.rvalue) Beta.append(LR.slope) DFlis.append(DF) except IndexError: DF = pd.read_csv(item, sep = ',', encoding = encoding) T0.append(DF[DF.columns[1]][0]+273.15) DF['%m'] = 100*(DF[DF.columns[2]]/DF[DF.columns[2]][0]) #creates a column for the temperature in Kelvin DF['Temperature [K]'] = DF[DF.columns[1]] + 273.15 #computes the differential thermogram with a Savitzki-Golay filter dTdt = derivative.dxdt(DF[DF.columns[1]].values, DF[DF.columns[0]].values, kind="savitzky_golay", order=3, left=0.5, right=0.5) DF['dT/dt$'] = DF[DF.columns[0]] DF['dT/dt$'] = dTdt dwdt = derivative.dxdt(DF[DF.columns[2]].values, DF[DF.columns[0]].values, kind="savitzky_golay", order=3, left=0.5, right=0.5) DF['dw/dt'] = DF[DF.columns[0]] DF['dw/dt'] = dwdt #computes the heating rate LR = linregress(DF[DF.columns[0]], DF[DF.columns[1]]) BetaCorrCoeff.append(LR.rvalue) Beta.append(LR.slope) DFlis.append(DF) self.DFlis = DFlis #List of the DataFrames constructed self.Beta = np.array(Beta) #Array of heating rates in ascendent order self.BetaCC = np.array(BetaCorrCoeff) #Array of correlation coefficients for the heating rates self.T0 = np.array(T0) #Array of experimental initial temperatures print(f'The computed heating rates are:\n') for i in range(len(Beta)): print(f'{Beta[i]:.2f} K/min') return self.Beta, self.T0 #----------------------------------------------------------------------------------------------------------- def Conversion(self,T0,Tf): """ Calculates the conversion values for a given temperature range. Not all experimental points are suitable for the isoconversional analysis, so a temperature analysis range must be selected based on the thermal profile of the sample. Parameters: T0: Initial temperature in Kelvin of the interval where the process to study is. Tf: Final temperature in Kelvin of the interval where the process to study is. Returns: A plot of the temperature range to be used in the analysis. """ DFlist = self.DFlis NDFl = self.seg_DFlis print('The temperature range was set to ({0:0.1f},{1:0.1f}) K'.format((T0),(Tf))) print(f'Computing conversion values...') for item in DFlist: #filters the DataFrames based on the temperature limits item = item.loc[(item['Temperature [K]'] > T0) & (item['Temperature [K]'] < Tf)] item = item.reset_index(drop=True) #calculates the conversion item['alpha'] = (item[item.columns[2]][0]-item[item.columns[2]])/(item[item.columns[2]][0]-item[item.columns[2]][item.shape[0]-1]) #computes the cnversion rate with a Savitzki-Golay filter dadt = derivative.dxdt(item['alpha'].values, item[item.columns[0]].values, kind="savitzky_golay", order=3, left=0.5, right=0.5) item['da/dt'] = item[item.columns[0]] item['da/dt'] = dadt NDFl.append(item) alpha = self.alpha T = self.T t = self.t da_dt = self.da_dt #To create the Isoconversional DataFrames interpolation is needed. #In order to make the interpolation the x values must be strictly in ascending order. #The next block of code evaluates if the i-th value is bigger than the i-1-th, if so, #the value is appended to the corresponding list. for i in range(len(NDFl)): #The initial values are those of the lower limit of the temperature range. a = [NDFl[i]['alpha'].values[0]] Temp = [NDFl[i]['Temperature [K]'].values[0]] time = [NDFl[i][DFlist[i].columns[0]].values[0]] diff = [NDFl[i]['da/dt'].values[1]] for j in range(len(NDFl[i]['alpha'].values)): if NDFl[i]['alpha'].values[j] == a[-1]: pass #If the i-th value is bigger than the i-1-th #its corresponding values of time, temperature #and conversion rate and itself are stored #in a corresponding list. elif NDFl[i]['alpha'].values[j] > a[-1]: a.append(NDFl[i]['alpha'].values[j]) Temp.append(NDFl[i]['Temperature [K]'].values[j]) time.append(NDFl[i][NDFl[i].columns[0]].values[j]) diff.append(NDFl[i]['da/dt'].values[j]) else: pass alpha.append(np.array(a)) T.append(np.array(Temp)) t.append(np.array(time)) da_dt.append(np.array(diff)) print(f'Done') self.seg_DFlis = NDFl #list of segmented DataFrames self.alpha = alpha #list of arrays of conversion values for each heating rate self.T = T #list of arrays of temperatures corresponding to a conversion value self.t = t #list of arrays of temperatures corresponding to a conversion value self.da_dt = da_dt plt.style.use('tableau-colorblind10') markers = ["o","v","x","1","s","^","p","<","2",">"] #Plot of the thermograms showing the anaysis range. for i in range(len(DFlist)): plt.plot(DFlist[i]['Temperature [K]'].values[::40], #Temperature in Kelvin DFlist[i]['%m'].values[::40], #mass loss percentage marker = markers[i], linestyle = '--', label=r'$\beta=$'+str(np.round(self.Beta[i],decimals=2))+' K/min', alpha=0.75) plt.axvline(x=(T0),alpha=0.8,color='red',ls='--',lw=1.2) #temperature lower limit plt.axvline(x=(Tf),alpha=0.8,color='red',ls='--',lw=1.2) #temperature upper limit plt.ylabel('mass [%]') plt.xlabel('Temperature [K]') plt.xlim((T0-20),(Tf+20)) plt.legend(frameon=True) plt.grid(True) plt.show() #----------------------------------------------------------------------------------------------------------- def Isoconversion(self, advanced = False, method='points', N = 1000, d_a = 0.001): """ Constructs the isoconversional DataFrames. Parameters: advanced: Boolean value. If set to True the advanced isoconverional DataFrames will be constructed. method: String. 'points' or 'step'. In case of setting advanced to True the conversion array can be constructed con the linspace or arange functions of numpy. 'points' will call for linspace while 'step' will call for arange. N: The number of points in the conversion array If method is set to 'points'. d_a: The size of the step from the i-th to the i+1-th value in the conversion array If method is set to 'step'. Returns: pandas.DataFrame objects: Temperatures Dataframe, times DataFrame, conversion rates DataFrame. If advanced is to True it also returns a Temperatures and times DataFrames for the aadvanced method of Vyazovkin (aVy method in ActivationEnergy). """ alpha = self.alpha T = self.T t = self.t da_dt = self.da_dt Beta = self.Beta TempIsoDF = self.TempIsoDF timeIsoDF = self.timeIsoDF diffIsoDF = self.diffIsoDF TempAdvIsoDF = self.TempAdvIsoDF timeAdvIsoDF = self.timeAdvIsoDF #The experimental set with the least points is selected as conversion #array for the isoconversional coomputations because all the other data sets #have more points to interpolate a reliable function for the conversion array alps = np.array(alpha[-1]) print(f'Creating Isoconversion DataFrames...') #The time, temperature and conversion rate values corresponding to conversion array #selected are pass atrightforward to the corresponding isoconversional DataFrame TempIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(T[-1], decimals = 4) timeIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(t[-1], decimals = 4) diffIsoDF['HR '+str(np.round(Beta[-1], decimals = 1)) + ' K/min'] = np.round(da_dt[-1], decimals = 4) for i in range(len(Beta)-1): #The interpolation functions to compute isoconversional values are constructed #as cubic splines with the scipy.interpolate.interp1d function inter_func = interp1d(alpha[i], t[i], kind='cubic', bounds_error=False, fill_value="extrapolate") #A column is added to the isoconversional DataFrames for each heating rate timeIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func(alps), decimals = 4) inter_func2 = interp1d(alpha[i], T[i], kind='cubic', bounds_error=False, fill_value="extrapolate") TempIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func2(alps), decimals = 4) inter_func3 = interp1d(alpha[i], da_dt[i], kind='cubic', bounds_error=False, fill_value="extrapolate") diffIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func3(alps), decimals = 4) #Sorting the columns in ascending order colnames = TempIsoDF.columns.tolist() colnames = colnames[1:] + colnames[:1] #Asigning the values of the conversion array as index for the DataFrames TempIsoDF.index = alpha[-1] TempIsoDF = TempIsoDF[colnames] #Isoconversional DataFrame of temperature timeIsoDF.index = alpha[-1] timeIsoDF = timeIsoDF[colnames] #Isoconversional DataFrame of time diffIsoDF.index = alpha[-1] diffIsoDF = diffIsoDF[colnames] #Isoconversional DataFrame of conversion rate self.TempIsoDF = TempIsoDF self.timeIsoDF = timeIsoDF self.diffIsoDF = diffIsoDF if advanced == True: #Conversion array based on the number of points. if method == 'points': adv_alps, d_a = np.linspace(alpha[-1][0],alpha[-1][-1],N,retstep=True) #Conversion array based on the \Delta\alpha value elif method == 'step': adv_alps = np.arange(alpha[-1][0],alpha[-1][-1],d_a) else: raise ValueError('Method not recognized') for i in range(0,len(Beta)): #New interpolation functions with the advanced conversion array inter_func = interp1d(alpha[i], T[i], kind='cubic', bounds_error=False, fill_value="extrapolate") TempAdvIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func(adv_alps), decimals = 4) inter_func2 = interp1d(alpha[i], t[i], kind='cubic', bounds_error=False, fill_value="extrapolate") timeAdvIsoDF['HR '+str(np.round(Beta[i], decimals = 1)) + ' K/min'] = np.round(inter_func2(adv_alps), decimals = 4) timeAdvIsoDF.index = adv_alps TempAdvIsoDF.index = adv_alps self.TempAdvIsoDF = TempAdvIsoDF #Isoconversional DataFrame of temperature for the advanced Vyazovkin method (aVy) self.timeAdvIsoDF = timeAdvIsoDF #Isoconversional DataFrame of time for the advanced Vyazovkin method (aVy) self.d_a = d_a #Size of the \Delta\alpha step else: pass print(f'Done') return self.TempIsoDF, self.timeIsoDF, self.diffIsoDF, self.TempAdvIsoDF, self.timeAdvIsoDF #----------------------------------------------------------------------------------------------------------- def get_beta(self): """ Getter for the heating rates. Parameters: None Returns: array object containing the experimental heating rate sorted in ascendent order obtained from a linear regression of T vs t. """ return self.Beta #----------------------------------------------------------------------------------------------------------- def get_betaCC(self): """ Getter for the correlation coefficient of the heating rates. Parameters: None Returns: list object containing the experimental T vs t correlation coefficient obtained from a linear regression, sorted in correspondance with the heating rate list (attribute Beta). """ return self.BetaCC #----------------------------------------------------------------------------------------------------------- def get_DFlis(self): """ Getter of the list containing the DataFrames of the experimental runs. Parameters: None Returns: list object containing the DataFrames with the experimental data, sorted in correspondance with the heating rate list (attribute Beta). """ return self.DFlis #----------------------------------------------------------------------------------------------------------- def get_TempIsoDF(self): """ Getter for the Temperatures DataFrame. Parameters: None Returns: DataFrame of isoconversional temperatures. The index is the set of conversion values from the experiment with the less data points (which correspond to the smallest heating rate). The columns are isoconversional temperatures, sorted in heating rate ascendent order from left to right. """ return self.TempIsoDF #----------------------------------------------------------------------------------------------------------- def get_timeIsoDF(self): """ Getter for the times DataFrame. Parameters: None Returns: DataFrame of isoconversional times. The index is the set of conversion values from the experiment with the less data points (which correspond to the smallest heating rate). The columns are isoconversional times, sorted in heating rate ascendent order from left to right. """ return self.timeIsoDF #----------------------------------------------------------------------------------------------------------- def get_diffIsoDF(self): """ Getter for the conversion rates DataFrame. Parameters: None Returns: DataFrame of isoconversional conversion rates. The index is the set of conversion values from the experiment with the less data points (which correspond to the smallest heating rate). The columns are isoconversional conversion rates, sorted in heating rate ascendent order from left to right. """ return self.timeIsoDF #----------------------------------------------------------------------------------------------------------- def get_TempAdvIsoDF(self): """ Getter for the Temperatures DataFrame for the advenced method of Vyazovkin (aVy). Parameters: None Returns: DataFrame of isoconversional temperatures for the advanced Vyazovkin method. The index is a set of equidistant (attribute d_a) conversion values, with initial and final points taken from the experiment with the less data points (which correspond to the smallest heating rate). The columns are isoconversional temperatures, sorted in heating rate ascendent order from left to right. """ return self.TempAdvIsoDF #----------------------------------------------------------------------------------------------------------- def get_timeAdvIsoDF(self): """ Getter for the times DataFrame for the advenced method of Vyazovkin (aVy). Parameters: None Returns: DataFrame of isoconversional times for the advanced Vyazovkin method. The index is a set of equidistant (attribute d_a) conversion values, with initial and final points taken from the experiment with the less data points (which correspond to the smallest heating rate). The columns are isoconversional times, sorted in heating rate ascendent order from left to right. """ return self.timeAdvIsoDF #----------------------------------------------------------------------------------------------------------- def get_alpha(self): """ Getter for the list of arrays containig conversion values. Parameters: None Returns: list object containing arrays of the conversion values in ascendent order. The elements are sorted in correspondance with the heating rate list (attribute Beta). """ return self.alpha #----------------------------------------------------------------------------------------------------------- def get_dadt(self): """ Getter for the list of arrays containig conversion rate values corresponding to the alpha arrays. Parameters: None Returns: list object containing arrays of the conversion rates data corresponding to the conversion values of each element in the attribute alpha. The elements are sorted in correspondance with the heating rate list (attribute Beta). """ return self.da_dt #----------------------------------------------------------------------------------------------------------- def get_t(self): """ Getter for the list of arrays containig time values corresponding to the alpha arrays. Parameters: None Returns: list object containing arrays of the time data corresponding to the conversion values of each element in the attribute alpha. The elements are sorted in correspondance with the heating rate list (attribute Beta). """ return self.t #----------------------------------------------------------------------------------------------------------- def get_T(self): """ Getter for the list of arrays containig temperature values corresponding to the alpha arrays. Parameters: None Returns: list object containing arrays of the temperature data corresponding to the conversion values of each element in the attribute alpha. The elements are sorted in correspondance with the heating rate list (attribute Beta). """ return self.T #----------------------------------------------------------------------------------------------------------- def get_avsT_plot(self): """ Visualization method for alpha vs T Parameters: None Returns: A matplotlib figure plotting conversion vs temperature for each heating rate in attribute Beta. """ for i in range(len(self.DFlis)): plt.plot(self.T[i], self.alpha[i], label=str(np.round(self.Beta[i],decimals=1))+' K/min') plt.xlabel('T [K]') plt.ylabel(r'$\alpha$') plt.legend() return plt.show() #----------------------------------------------------------------------------------------------------------- def get_dadtvsT_plot(self): """ Visualization method for da_dt vs T Parameters: None Returns: A matplotlib figure plotting conversion rate vs temperature for each heating rate in attribute Beta. """ for i in range(len(self.DFlis)): plt.plot(self.T[i], self.da_dt[i], label=str(np.round(self.Beta[i],decimals=1))+' K/min') plt.xlabel('T [K]') plt.ylabel(r'$\text{d}\alpha/\text{d}t [min$^{-1}$]') plt.legend() return plt.show() #----------------------------------------------------------------------------------------------------------- def get_avst_plot(self): """ Visualization method for alpha vs t Parameters: None Returns: A matplotlib figure plotting conversion vs time for each heating rate in attribute Beta. """ for i in range(len(self.DFlis)): plt.plot(self.t[i], self.alpha[i], label=str(np.round(self.Beta[i],decimals=1))+' K/min') plt.xlabel(self.DFlis[i].columns[0]) plt.ylabel(self.DFlis[i].columns[4]) plt.legend() return plt.show() #----------------------------------------------------------------------------------------------------------- def get_dadtvst_plot(self): """ Visualization method for da_dt vs t Parameters: None Returns: A matplotlib figure plotting conversion rate vs time for each heating rate in attribute Beta. """ for i in range(len(self.DFlis)): plt.plot(self.t[i], self.da_dt[i], label=str(np.round(self.Beta[i],decimals=1))+' K/min') plt.xlabel(self.DFlis[i].columns[0]) plt.ylabel('$\alpha$') plt.legend() return plt.show() #----------------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------------- class ActivationEnergy(object): """ Uses the attributes of Dataextraction to compute activation energy values based on five methods: Friedman, FOW, KAS, Vyazovkin and Advanced Vyazovkin. """ def __init__(self, Beta, T0, TempIsoDF=None, diffIsoDF=None, TempAdvIsoDF=None, timeAdvIsoDF=None): """ Constructor. Defines variables and the constant R=8.314 J/(mol K) Parameters: Beta : array object containing the values of heating rate for each experiment. T0 : array of initial experimental temperatures. TempIsoDF : pandas DataFrame containing the isoconversional temperatures. diffIsoDF : pandas DataFrame containing the isoconversional conversion rate (da_dt). TempAdvIsoDF : pandas DataFrame containing the isoconversional temperatures, corresponding to evenly spaced values of conversion. timeAdvIsoDF : pandas DataFrame containing the isoconversional times, corresponding to evenly spaced values of conversion. """ self.Beta = Beta #Array of heating rates self.logB = np.log(Beta) #Array of log10(heating rate) self.TempIsoDF = TempIsoDF #Isoconversional DataFrame of temperatures self.diffIsoDF = diffIsoDF #Isoconversional DataFrames of conversion rates self.TempAdvIsoDF = TempAdvIsoDF #Isoconversional DataFrame of temperatures for the advanced Vyazovkin method (aVy) self.timeAdvIsoDF = timeAdvIsoDF #Isoconversional DataFrame of times for the advanced Vyazovkin method (aVy) self.T0 = T0 #Array of initial experimental temperatures self.E_Fr = [] #Container for the Friedmann (Fr) method results self.E_OFW = [] #Container for the OFW method (OFW) results self.E_KAS = [] #Container for the KAS method (KAS) results self.E_Vy = [] #Container for the Vyazovkin method (Vy) results self.E_aVy = [] #Container for the advanced Vyazovkin method (aVy)results self.R = 0.0083144626 #Universal gas constant 0.0083144626 kJ/(mol*K) #----------------------------------------------------------------------------------------------------------- def Fr(self): """ Computes the Activation Energy based on the Friedman treatment. \ln{(d\alpha/dt)}_{\alpha ,i} = \ln{[A_{\alpha}f(\alpha)]}-\frac{E_{\alpha}}{RT_{\alpha ,i}} Parameters: None Returns: Tuple of arrays: E_Fr : numpy array containing the activation energy values obtained by the Friedman method. Fr_95e : numpy array containing the standard deviation of the. activation energies obtained by the Friedman method. Fr_b : numpy array containing the intersection values obtained by the linear regression in the Friedman method. ---------------------------------------------------------------------------------- Reference: <NAME>, Kinetics of thermal degradation of char-forming plastics from thermogravimetry. application to a phenolic plastic, in: Journal of polymer science part C: polymer symposia, Vol. 6, Wiley Online Library, 1964, pp. 183–195. """ E_Fr = [] E_Fr_err = [] Fr_b = [] diffIsoDF = self.diffIsoDF TempIsoDF = self.TempIsoDF print(f'Friedman method: Computing activation energies...') for i in range(0,diffIsoDF.shape[0]): #Linear regression over all the conversion values in the isoconversional Dataframes y = np.log(diffIsoDF.iloc[i].values) #log(da_dt) x = 1/(TempIsoDF.iloc[i].values) #1/T LR = linregress(x,y) E_a_i = -(self.R)*(LR.slope) #Activation Energy E_Fr.append(E_a_i) Fr_b.append(LR.intercept) #ln[Af(a)] error = -(self.R)*(LR.stderr) #Standard deviation of the activation energy E_Fr_err.append(error) E_Fr = np.array(E_Fr) Fr_e = np.array(E_Fr_err) Fr_b = np.array(Fr_b) #Tuple with the results: Activation energy, Standard deviation and ln[Af(a)] self.E_Fr = (E_Fr, Fr_e, Fr_b) print(f'Done.') return self.E_Fr #----------------------------------------------------------------------------------------------------------- def OFW(self): """ Computes the Activation Energy based on the Osawa-Flynn-Wall (OFW) treatment. \ln{\beta_{i}} = cnt - 1.052\frac{E_{\alpha}}{RT_{\alpha ,i}} Parameters: None Returns : Tuple of arrays: E_OFW : numpy array containing the activation energy values obtained by the Ozawa_Flynn-Wall method OFW_s : numpy array containing the standard deviation of the activation energy values obtained by the linear regression in the Ozawa-Flynn-Wall method ----------------------------------------------------------------------------------------------- References: <NAME>, A new method of analyzing thermogravimetric data, Bulletin of the chemical society of Japan 38 (11) (1965) 1881–1886. <NAME>, <NAME>, A quick, direct method for the determination of activation energy from thermogravimetric data, Journal of Polymer Science Part B: Polymer Letters 4 (5) (1966) 323–328. """ logB = self.logB E_OFW = [] E_OFW_err = [] TempIsoDF = self.TempIsoDF print(f'Ozawa-Flynn-Wall method: Computing activation energies...') for i in range(TempIsoDF.shape[0]): #Linear regression over all the conversion values in the isoconversional Dataframes y = (logB) #log(\beta) x = 1/(TempIsoDF.iloc[i].values) #1/T LR = linregress(x,y) E_a_i = -(self.R/1.052)*(LR.slope) #Activation energy error = -(self.R/1.052)*(LR.stderr) #Standard deviation of the activation energy E_OFW_err.append(error) E_OFW.append(E_a_i) E_OFW = np.array(E_OFW) OFW_s = np.array(E_OFW_err) #Tuple with the results: Activation energy, Standard deviation self.E_OFW = (E_OFW, OFW_s) print(f'Done.') return self.E_OFW #----------------------------------------------------------------------------------------------------------- def KAS(self): """ Computes the Activation Energy based on the Kissinger-Akahira-Sunose (KAS) treatment. \ln{\frac{\beta_{i}}{T^{2}_{\alpha ,i}} = cnt - \frac{E_{\alpha}}{RT_{\alpha ,i}} Parameters: None Returns : Tuple of arrays: E_KAS : numpy array containing the activation energy values obtained by the Kissinger-Akahra-Sunose method. KAS_s : numpy array containing the standard deviation of the activation energy values obtained by the linear regression in the Kissinger-Akahra-Sunose method. --------------------------------------------------------------------------------------- Reference: <NAME>, Reaction kinetics in differential thermal analysis, Analytical chemistry 29 (11) (1957) 1702–1706. """ logB = self.logB E_KAS = [] E_KAS_err = [] TempIsoDF = self.TempIsoDF print(f'Kissinger-Akahira-Sunose method: Computing activation energies...') for i in range(TempIsoDF.shape[0]): #Linear regression over all the conversion values in the isoconversional Dataframes y = (logB)- np.log((TempIsoDF.iloc[i].values)**1.92) #log[1/(T**1.92)] x = 1/(TempIsoDF.iloc[i].values) #1/T LR = linregress(x,y) E_a_i = -(self.R)*(LR.slope) #Activation energy error = -(self.R)*(LR.stderr) #Standard deviation of the activation energy E_KAS_err.append(error) E_KAS.append(E_a_i) E_KAS = np.array(E_KAS) KAS_s = np.array(E_KAS_err) #Tuple with the results: Activation energy, Standard deviation self.E_KAS = (E_KAS, KAS_s) print(f'Done.') return self.E_KAS #----------------------------------------------------------------------------------------------------------- def I_Temp(self, E, row_i, col_i, method): """ Temperature integral for the Vyazovkin method: \int_{T0}^{T} exp[E_{alpha}/RT]dT Parameters: E : Activation energy value in kJ/mol to compute the integral row_i : DataFrame index value associated to the conversion value of the computation. col_i : DataFrame column associated to the heating rate of the computation method : Method to compute the integral temperature. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of quadrature, 'simpson' for the simpson rule and 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: Float. Result of the division of the integral value by the heating rate. """ TempIsoDF = self.TempIsoDF Beta = self.Beta #Heating rate for thee computation B = Beta[col_i] #Initial experimental temperature. Lower limit in the temperature integral T0 = self.T0[col_i] #Upper limit in the temperature integral T = TempIsoDF[TempIsoDF.columns[col_i]][TempIsoDF.index.values[row_i]] #Value of the Arrhenius exponential for the temperature T0 and the energy E y0 = np.exp(-E/(self.R*(T0))) #Value of the Arrhenius exponential for the temperature T and the energy E y = np.exp(-E/(self.R*(T))) #Senum-Yang approximation def senum_yang(E): x = E/(self.R*T) num = (x**3) + (18*(x**2)) + (88*x) + (96) den = (x**4) + (20*(x**3)) + (120*(x**2)) +(240*x) +(120) s_y = ((np.exp(-x))/x)*(num/den) return (E/self.R)*s_y if method == 'trapezoid': I = integrate.trapezoid(y=[y0,y],x=[T0,T]) #Division of the integral by the heating rate to get the factor $I(E,T)/B$ I_B = I/B return I_B elif method == 'senum-yang': I = senum_yang(E) #Division of the integral by the heating rate to get the factor $I(E,T)/B$ I_B = I/B return I_B elif method == 'simpson': I = integrate.simpson(y=[y0,y],x=[T0,T]) #Division of the integral by the heating rate to get the factor $I(E,T)/B$ I_B = I/B return I_B elif method == 'quad': def Temp_int(T,E): return np.exp(-E/(self. R*(T))) I = integrate.quad(Temp_int,T0,T,args=(E))[0] #Division of the integral by the heating rate to get the factor $I(E,T)/B$ I_B = I/B return I_B else: raise ValueError('method not recognized') #----------------------------------------------------------------------------------------------------------- def omega(self,E,row,method): """ Calculates the function to minimize for the Vyazovkin method: \Omega(Ea) = \sum_{i}^{n}\sum_{j}^{n-1}{[B_{j}{I(E,T_{i})]}/[B_{i}{I(E,T_{j})}]} Parameters: E : The activation energy value used to calculate the value of omega. row : index value for the row of conversion in the pandas DataFrame containing the isoconversional temperatures. method : Method to compute the integral temperature. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the simpson ruleand 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: O : Float. Value of the omega function for the given E. """ Beta = self.Beta omega_i = [] method = method #Array from a comprehension list of factors of \Omega(Ea) p = np.array([self.I_Temp(E,row,i, method=method) for i in range(len(Beta))]) #Double sum for j in range(len(Beta)): y = p[j]*((np.sum(1/(p)))-(1/p[j])) omega_i.append(y) return np.sum((omega_i)) #----------------------------------------------------------------------------------------------------------- def visualize_omega(self,row,bounds=(1,300),N=1000,method = 'senum-yang'): """ Method to visualize omega function: Parameters: row : Int object. Implicit index for the row of conversion in the pandas DataFrame containing the isoconversional temperatures. bounds : Tuple object containing the lower and upper limit values for E, to evaluate omega. N : Int. Number of points in the E array for the plot. method : Method to evaluate the temperature integral. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the simpson ruleand 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: A matplotlib figure plotting omega vs E. """ #Temperature DataFrame IsoDF = self.TempIsoDF #Quadrature method method = method #Activation energy (independent variable) array E = np.linspace(bounds[0], bounds[1], N) #Evaluation of \Omega(E) O = np.array([float(self.omega(E[i],row,method)) for i in range(len(E))]) #Plot settings plt.style.use('seaborn-whitegrid') plt.plot(E,O,color='teal',label=r'$\alpha$ = '+str(np.round(IsoDF.index[row],decimals=3))) plt.ylabel(r'$\Omega\left(E_{\alpha}\right)$') plt.xlabel(r'$E_{\alpha}$') plt.legend() plt.grid(True) return plt.show() #----------------------------------------------------------------------------------------------------------- def variance_Vy(self, E,row_i, method): """ Calculates the variance of the activation energy E obtained with the Vyazovkin treatment. The variance is computed as: S^{2}(E) = {1}/{n(n-1)}\sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T_{i})]}/[{J(E,T_{j})}]-1}^{2} Parameters: E : The activation energy value used to calculate the value of omega. row_i : index value for the row of conversion in the pandas DataFrame containing the isoconversional temperatures. method : Method to compute the integral temperature. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the simpson rule and 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: Float object. Value of the variance associated to a given E. -------------------------------------------------------------------------------------------- Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals for the activation energy determined from thermoanalytical measurements. Analytical chemistry, 72(14), 3171-3175. """ #Heating rates array Beta = self.Beta #Temperature Dataframes TempIsoDF = self.TempIsoDF #Total number of addends N = len(Beta)*(len(Beta)-1) #Temperature integrals into a list comprehrension I = np.array([self.I_Temp(E, row_i, i, method) for i in range(len(Beta))]) #Each value to be compared with one (s-1) to compute the variance s = np.array([I[i]/I for i in range(len(I))]) return np.sum((s-1)**2)/N #----------------------------------------------------------------------------------------------------------- def psi_Vy(self, E, row_i, method): """ Calculates the F distribution to minimize for the Vyazovkin method. The distribution is computed as: \Psi(E) = S^{2}(E)/S^{2}_{min} Parameters: E : The activation energy value used to calculate the value of omega. row_i : index value for the row of conversion in the pandas DataFrame containing the isoconversional temperatures. bounds : Tuple object containing the lower and upper limit values for E, to evaluate the variance. method : Method to compute the integral temperature. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the simpson rule and 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: error : Float. Value of the error calculated for a 95% confidence. -------------------------------------------------------------------------------------------- Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals for the activation energy determined from thermoanalytical measurements. Analytical chemistry, 72(14), 3171-3175. """ Beta = self.Beta TempIsoDF = self.TempIsoDF #F values for a 95% confidence interval for (n-1) and (n-1) degreees of freedom F = [161.4, 19.00, 9.277, 6.388, 5.050, 4.284, 3.787, 3.438, 3.179,2.978,2.687] #F value for the n-1 degrees of freedom. #Subtracts 1 to n (len(B)) because of degrees of freedom and 1 because of python indexation f = F[len(Beta)-1-1] #quadrature method from parameter "method" method = method #Psi evaluation interval E_p = np.linspace(1,E+50,50) #'True' value of the activation energy in kJ/mol for a given conversion (row_i) E_min = E #Variance of the 'True' activation energy s_min = self.variance_Vy(E_min, row_i, method) #Variance of the activation energy array E_p s = np.array([self.variance_Vy(E_p[i], row_i, method) for i in range(len(E_p))]) #Psi function moved towards negative values (f-1) in order #to set the confidence limits such that \psy = 0 for those values Psy_to_cero = (s/s_min)-f-1 #Interpolation function of \Psy vs E to find the roots #which are the confidence limits inter_func = interp1d(E_p, Psy_to_cero, kind='cubic', bounds_error=False, fill_value="extrapolate") #Finding the confidence limits zeros = np.array([fsolve(inter_func, E-150)[0], fsolve(inter_func, E+150)[0]]) error = np.mean(np.array([abs(E-zeros[0]), abs(E-zeros[1])])) return error #----------------------------------------------------------------------------------------------------------- def error_Vy(self,E, method): """ Method to calculate the distribution to minimize for the Vyazovkin method. Parameters: bounds : Tuple object containing the lower and upper limit values for E, to evaluate omega. method : Method to compute the integral temperature. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the Simpson rule and 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns: error_Vy : Array of error values associated to the array of activation energies obtained by the Vyazovkin method. """ error_Vy = np.array([self.psi_Vy(E[i], i, method) for i in range(len(E))]) return error_Vy #----------------------------------------------------------------------------------------------------------- def Vy(self, bounds, method='senum-yang'): """ Method to compute the Activation Energy based on the Vyazovkin treatment. \Omega(E_{\alpha})= min[ sum_{i}^{n}\sum_{j}^{n-1}[J(E,T_{i})]/[J(E,T_{j})] ] Parameters: bounds : Tuple object containing the lower and upper limit values for E, to evaluate omega. method : Method to evaluate the temperature integral. The available methods are: 'senum-yang' for the Senum-Yang approximation, 'trapezoid' for the the trapezoid rule of numerical integration, 'simpson' for the Simpson rule and 'quad' for using a technique from the Fortran library QUADPACK implemented in the scipy.integrate subpackage. Returns : Tuple of arrays: E_Vy : numpy array containing the activation energy values obtained by the first Vyazovkin method. error : numpy array containing the error associated to the activation energy within a 95% confidence interval. ------------------------------------------------------------------------------------------------ Reference: <NAME>, <NAME>, Linear and nonlinear procedures in isoconversional computations of the activation energy of nonisothermal reactions in solids, Journal of Chemical Information and Computer Sciences 36 (1) (1996) 42–45. """ E_Vy = [] Beta = self.Beta IsoDF = self.TempIsoDF print(f'Vyazovkin method: Computing activation energies...') for k in range(len(IsoDF.index)): E_Vy.append(minimize_scalar(self.omega, args=(k,method),bounds=bounds, method = 'bounded').x) E_Vy = np.array(E_Vy) error = self.error_Vy(E_Vy,method) self.E_Vy = (E_Vy, error) print(f'Done.') return self.E_Vy #----------------------------------------------------------------------------------------------------------- def J_Temp(self, E, inf, sup): """ Temperature integral for the Advanced Vyazovkin Treatment. Prameters: E : Float object. Value for the activation energy to evaluate the integral inf : Inferior integral evaluation limit. sup : Superior integral evaluation limit. Returns: J : Float. Value of the integral obtained by an analytic expression. Based on a linear heating rate. """ a = E/(self.R) b = inf c = sup #Computation of the intagral defined in terms of the exponential integral #calculated with scipy.special J = a*(sp.expi(-a/c)-sp.expi(-a/b)) + c*np.exp(-a/c) - b*np.exp(-a/b) return J #----------------------------------------------------------------------------------------------------------- def J_time(self, E, row_i, col_i, method = 'trapezoid'): """ Time integral for the Advanced Vyazovkin Treatment. Considering a linear heating rate. Prameters: E : Float object. Value for the activation energy to evaluate the integral row_i : Index value for the row of conversion in the pandas DataFrame containing the isoconversional times for evenly spaced conversion values. col_i : Index value for the column of heating rate in the pandas DataFrame containing the isoconversional times for evenly spaced conversion values. method : Numerical integration method. Can be 'trapezoid', 'simpson' or 'quad'. The method corresponds to those implemented in the scipy.integrate subpackage. Returns: J_t : Float. Value of the integral obtained by a numerical integration method. """ timeAdvIsoDF = self.timeAdvIsoDF #Heating rate for the computation B = self.Beta[col_i] #Initial experimental temperature T0 = self.T0[col_i] #Time values associated to the lower limit of the #Temperature range set with DataExtraction.Conversion t0 = timeAdvIsoDF[timeAdvIsoDF.columns[col_i]][timeAdvIsoDF.index.values[row_i]] #Time associated to the i-th conversion value t = timeAdvIsoDF[timeAdvIsoDF.columns[col_i]][timeAdvIsoDF.index.values[row_i+1]] #Value for the Arrhenius exponential for the time t0 and energy E y0 = np.exp(-E/(self.R*(T0+(B*t0)))) #Value for the Arrhenius exponential for the time t and energy E y = np.exp(-E/(self.R*(T0+(B*t)))) if method == 'trapezoid': J_t = integrate.trapezoid(y=[y0,y],x=[t0,t]) return J_t elif method == 'simpson': J_t = integrate.simpson(y=[y0,y],x=[t0,t]) return J_t elif method == 'quad': def time_int(t,T0,B,E): return np.exp(-E/(self.R*(T0+(B*t)))) J_t = integrate.quad(time_int,t0,t,args=(T0,B,E))[0] return J_t else: raise ValueError('method not recognized') #----------------------------------------------------------------------------------------------------------- def adv_omega(self,E, row, var = 'time', method='trapezoid'): """ Function to minimize according to the advanced Vyazovkin treatment: \Omega(Ea) = \sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T(t_{i}))]}/[B_{i}{J(E,T(t_{j}))}]} Parameters: E : Float object. Value for the activation energy to evaluate the integral row : Index value for the row of conversion in the pandas DataFrame containing the isoconversional times for evenly spaced conversion values. var : The variable to perform the integral with, it can be either 'time' or 'Temperature' method : Numerical integration method. Can be 'trapezoid', 'simpson' or 'quad'. The method correspond to those implemented in the scipy.integrate subpackage. Returns: O : Float. Value of the advanced omega function for a given E. """ TempAdvIsoDF = self.TempAdvIsoDF timeAdvIsoDF = self.timeAdvIsoDF Beta = self.Beta j = row #Array from a comprehension list of factors of \Omega(Ea) #The variable of integration depends on the parameter var if var == 'Temperature': I_x = np.array([self.J_Temp(E, TempAdvIsoDF[TempAdvIsoDF.columns[i]][TempAdvIsoDF.index[j]], TempAdvIsoDF[TempAdvIsoDF.columns[i]][TempAdvIsoDF.index[j+1]]) for i in range(len(TempAdvIsoDF.columns))]) #Dividing by beta to get the factor $I(E,T)/B$ I_B = I_x/Beta #Double sum omega_i = np.array([I_B[k]*((np.sum(1/(I_B)))-(1/I_B[k])) for k in range(len(Beta))]) O = np.array(np.sum((omega_i))) return O elif var == 'time': I_B = np.array([self.J_time(E, row, i, method) for i in range(len(timeAdvIsoDF.columns))]) #Double sum omega_i = np.array([I_B[k]*((np.sum(1/(I_B)))-(1/I_B[k])) for k in range(len(Beta))]) O = np.array(np.sum((omega_i))) return O #----------------------------------------------------------------------------------------------------------- def visualize_advomega(self,row,var='time',bounds=(1,300),N=1000, method='trapezoid'): """ Method to visualize adv_omega function. Parameters: row : Index value for the row of conversion in the pandas DataFrame containing the isoconversional times or temperatures. var : The variable to perform the integral with, it can be either 'time' or 'Temperature'. Default 'time'. bounds : Tuple object containing the lower limit and the upper limit values of E, for evaluating adv_omega. Default (1,300). N : Int. Number of points in the E array for the plot. Default 1000. method : Numerical integration method. Can be 'trapezoid', 'simpson' or 'quad'. The method correspond to those implemented in the scipy.integrate subpackage. Default 'trapezoid'. Returns: A matplotlib plot of adv_omega vs E """ #Temperature DataFrame TempAdvIsoDF = self.TempAdvIsoDF #time DataFrame timeAdvIsoDF = self.timeAdvIsoDF #Heating Rates Beta = self.Beta #Activation energy (independent variable) array E = np.linspace(bounds[0], bounds[1], N) #Evaluation of \Omega(E) O = np.array([float(self.adv_omega(E[i],row,var,method)) for i in range(len(E))]) plt.style.use('seaborn-whitegrid') plt.plot(E,O,color='teal',label=r'$\alpha$ = '+str(np.round(timeAdvIsoDF.index[row],decimals=3))) plt.ylabel(r'$\Omega\left(E_{\alpha}\right)$') plt.xlabel(r'$E_{\alpha}$') plt.legend() plt.grid(True) return plt.show() #----------------------------------------------------------------------------------------------------------- def variance_aVy(self, E, row_i, var = 'time', method = 'trapezoid'): """ Method to calculate the variance of the activation energy E obtained with the Vyazovkin treatment. The variance is computed as: S^{2}(E) = {1}/{n(n-1)}\sum_{i}^{n}\sum_{j}^{n-1}{[{J(E,T(t_{i}))]}/[{J(E,T(t_{j}))}]-1}^{2} Parameters: E : The activation energy value used to calculate the value of omega. row_i : index value for the row of conversion in the pandas DataFrame containing the isoconversional temperatures. var : The variable to perform the integral with, it can be either 'time' or 'Temperature' method : Numerical integration method. Can be 'trapezoid', 'simpson' or 'quad'. The method correspond to those implemented in the scipy.integrate subpackage. Default 'trapezoid'. Returns: Float object. Value of the variance associated to a given E. -------------------------------------------------------------------------------------------- Reference: <NAME>., & <NAME>. (2000). Estimating realistic confidence intervals for the activation energy determined from thermoanalytical measurements. Analytical chemistry, 72(14), 3171-3175. """ #Total number of addends N = len(self.Beta)*(len(self.Beta)-1) #Selection of the integral based on parameter "var" if var == 'time': #lower limit inf = self.timeAdvIsoDF.index.values[row_i] #upper limit sup = self.timeAdvIsoDF.index.values[row_i+1] #initial temperature T0 = self.T0 #time integrals into a list comprehension J = np.array([self.J_time(E, row_i, i, method) for i in range(len(self.Beta))]) #Each value to be compared with one (s-1) to compute the variance s = np.array([J[i]/J for i in range(len(J))]) return
np.sum((s-1)**2)
numpy.sum
# <NAME> import os import sys import numpy as np from get_dataset import get_scan, scan_pading, save_seg_imgs from keras.models import model_from_json def predict(model, scans): section_size = scans.shape[-1] X, _ = scan_pading(scans, None, section_size = 128) pad_size = X.shape[-1]-section_size # For splitting: splitted_scans = [] for i in range(0, X.shape[-1]-127, 128): splitted_scans.append(X[:,:,i:i+128]) X = np.array(splitted_scans, dtype='float32') X = ((X-np.min(X))/(np.max(X)-
np.min(X)
numpy.min
import numpy as np import torch from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate from torch.utils.data.sampler import SubsetRandomSampler from transformers import AutoTokenizer class AlignCollate: # if use phobert embedding def __init__(self, max_length=50): self.tokenizer = AutoTokenizer.from_pretrained('vinai/phobert-base') self.max_length = max_length def __call__(self, batch): new_img = [] new_label = [] new_len = [] result = [] for data in batch: label = data['label'] input_ids = [1] * (self.max_length + 2) ids = self.tokenizer.encode(label) input_ids[:len(ids)] = ids input_ids = torch.tensor(input_ids) new_img.append(data['img']) new_label.append(input_ids) new_len.append(data['len']) result.append(torch.stack(new_img)) result.append(torch.stack(new_label)) result.append(torch.tensor(new_len)) return result class BaseDataLoader(DataLoader): """ Base class for all data loaders """ def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate): self.validation_split = validation_split self.shuffle = shuffle self.batch_idx = 0 self.n_samples = len(dataset) self.sampler, self.valid_sampler = self._split_sampler(self.validation_split) self.init_kwargs = { 'dataset': dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'num_workers': num_workers, 'collate_fn': default_collate } super().__init__(sampler=self.sampler, **self.init_kwargs) def _split_sampler(self, split): if split == 0.0: return None, None idx_full = np.arange(self.n_samples) np.random.seed(0)
np.random.shuffle(idx_full)
numpy.random.shuffle
import numpy as np from pynet.history import History from pynet.plotting.image import plot_losses, linear_reg_plots import matplotlib.pyplot as plt import matplotlib.ticker as ticker from json_config import CONFIG from pynet.utils import * from sklearn.linear_model import LinearRegression from sklearn.calibration import calibration_curve from sklearn.metrics import * from pynet.metrics import ECE_score, AUCE_score, get_binary_classification_metrics, get_regression_metrics from pynet.models.densenet import * from pynet.cca import CCAHook, svcca_distance from pynet.datasets.core import DataManager from pynet.transforms import * from tqdm import tqdm import pickle import seaborn from matplotlib.ticker import FixedLocator, FixedFormatter from scipy.special import expit seaborn.set_style("darkgrid") ## Plots the metrics during the optimization root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' nets = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG11', 'TinyDenseNet_Exp9', 'SFCN'] net_names = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'tiny-VGG', 'VGG11', 'tiny-DenseNet', 'SFCN'] path_nets = ['ResNet/ResNet18', 'ResNet/ResNet34', 'ResNet/ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG/VGG11', 'TinyDenseNet', 'SFCN'] problem = "Age" files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets] val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets] test_files = ['Test_{net}_{pb}_{db}_fold%s_epoch{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets] h = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, files)] h_val = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, val_files)] tests = [get_pickle_obj(os.path.join(root, net, 'N_500', problem, file)%0) for (net, file) in zip(path_nets, test_files)] metrics = None#['loss_prop'] plot_losses(h, h_val, patterns_to_del=['validation_', ' on validation set'], metrics=metrics, experiment_names=net_names, #titles={'loss': 'Age prediction'}, ylabels={'loss': 'MAE'}, ylim={'loss': [0, 20]}, figsize=(15,15), same_plot=True, saving_path="age_N_500_cnn_convergence.png", ) fig, axes = plt.subplots(3, 3, figsize=(15, 15)) for i, net in enumerate(nets): linear_reg_plots(np.array(tests[i]['y_pred']).reshape(-1, 1), np.array(tests[i]['y_true']).reshape(-1,1), axes=axes[i%3, i//3], title=net_names[i]) plt.tight_layout() plt.savefig('linear_reg_age_benchmark.png') ## Visualization of random MRI pictures with both CAT12 and QUASI-RAW preproc from nibabel import Nifti1Image from nilearn.plotting import plot_anat import pandas as pd data_quasi_raw = np.load(CONFIG['quasi_raw']['input_path'], mmap_mode='r') df_quasi_raw = pd.read_csv(CONFIG['quasi_raw']['metadata_path'], sep='\t') data_cat12 = np.load(CONFIG['cat12']['input_path'], mmap_mode='r') df_cat12 = pd.read_csv(CONFIG['cat12']['metadata_path'], sep='\t') img_quasi_raw = data_quasi_raw[0,0] cat12_index = np.where(df_cat12.participant_id.eq(str(df_quasi_raw.participant_id[0])))[0][0] img_cat12 = data_cat12[cat12_index,0] # get the same picture img_names = ['Quasi-Raw', 'VBM'] fig, axes = plt.subplots(2, 3, figsize=(15, 10)) for i, (img, name) in enumerate(zip([img_quasi_raw, img_cat12], img_names)): current_image = Nifti1Image((img-img.mean())/img.std(), np.eye(4)) for j, direction in enumerate(['x', 'y', 'z']): plot_anat(current_image, cut_coords=[50], display_mode=direction, axes=axes[i][j], annotate=True, draw_cross=False, black_bg='auto', vmax=3 if i==0 else 5, vmin=0) if j == 1: axes[i][j].set_title(name, fontweight='bold') axes[-1, -1].axis('off') plt.subplots_adjust(wspace=0) plt.savefig('cat12_quasi_raw_examples.png') ## Plots the convergence curves for all the networks: nb of iter steps until convergence function of ## samples size root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9'] net_names = ['ResNet34', 'DenseNet', 'tiny-VGG', 'tiny-DenseNet'] path_nets = ['ResNet/ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet'] pbs = ["Age", "Sex", "Dx"] dbs = ["HCP_IXI", "HCP_IXI", "SCZ_VIP"] metrics = {"quasi_raw": ['validation_loss', 'validation_loss', 'validation_loss'], "": ['validation_loss', 'validation_loss', 'validation_loss' ]} modes = ['Validation', 'Validation', 'Validation'] preprocessings = ["", "quasi_raw"] nb_folds = [[10, 10, 5, 5, 5, 3], [10, 10, 5, 5, 5, 3], [10, 10, 5]] epochs = [299, 299, 299] sliding_window_size = 20 thresholds = {"quasi_raw": [0.65, 0.04, 0.05], "": [0.3, 0.06, 0.03]} N = {"Age": [100, 300, 500, 1000, 1600, 10000], "Sex": [100, 300, 500, 1000, 1600, 10000], "Dx": [100, 300, 500]} def get_stability_errors(loss, type='consecutive'): import pandas as pd if type == 'consecutive': return np.convolve(np.abs(loss[1:]-loss[:-1]), 1./sliding_window_size*np.ones(sliding_window_size, dtype=int), 'valid') if type == "std": s = pd.Series(loss).rolling(window=sliding_window_size).std().values[sliding_window_size:] return s def get_stable_step(errors, threshold, offset=0): step = len(errors) - 1 for i, err in enumerate(errors): if err <= threshold and np.all(errors[i:]<=threshold): return i + offset return step + offset conv_fig, conv_axes = plt.subplots(1, len(pbs), figsize=(len(pbs)*5, 5)) for preproc in preprocessings: for i, (pb, db, nb_f, epoch, metric, threshold, mode) in enumerate(zip(pbs, dbs, nb_folds, epochs, metrics[preproc], thresholds[preproc], modes)): hyperparams = len(N[pb])*[''] # if preproc == 'quasi_raw' and pb == "Age": # hyperparams = ["_step_size_scheduler_10" if n < 1000 else "" for n in N[pb]] h_val = [[History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb, 'Validation_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'. format(net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy', hyper=hyperparams[l],fold=0, epoch=epoch))) for (path_net, net) in zip(path_nets, nets)] for l, n in enumerate(N[pb])] h = [[History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb, 'Train_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'. format(net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy', hyper=hyperparams[l],fold=0, epoch=epoch))) for (path_net, net) in zip(path_nets, nets)] for l, n in enumerate(N[pb])] losses = [[[np.array(History.load(os.path.join(root, preproc, path_net, 'N_%s'%(str(n) if n<10**4 else '10K'), pb, '{mode}_{net}_{pb}_{db}{hyper}_{fold}_epoch_{epoch}.pkl'. format(mode=mode, net=net, pb=pb, db=db if n<10**4 else 'Big_Healthy', hyper=hyperparams[l],fold=f, epoch=epoch))). to_dict(patterns_to_del=' on validation set')[metric][-1]) for f in range(nb_f[l])] for l,n in enumerate(N[pb])] for (path_net, net) in zip(path_nets, nets)] sum_diff_errors = [[[get_stability_errors(val, 'std') for val in h_val_per_n] for h_val_per_n in h_val] for h_val in losses] nb_epochs_after_conv = [[[get_stable_step(errors, threshold, offset=sliding_window_size) for errors in sum_diff_errors_per_n] for sum_diff_errors_per_n in sum_diff_errors_per_net] for sum_diff_errors_per_net in sum_diff_errors] for l, net in enumerate(net_names): seaborn.lineplot(x=[n for i, n in enumerate(N[pb]) for _ in range(nb_f[i])], y=[e*n for epochs,n in zip(nb_epochs_after_conv[l], N[pb]) for e in epochs], marker='o', label=net, ax=conv_axes[i]) conv_axes[i].legend() conv_axes[i].set_xlabel('Number of training samples') conv_axes[i].set_title('%s Prediction'%pb.upper(), fontweight='bold') conv_axes[i].set_xticks(N[pb]) conv_axes[i].set_xticklabels(N[pb]) conv_axes[i].set_ylabel('# iterations until convergence') conv_axes[i].set_xscale('log') if pb == "Dx": for k, n in enumerate(N[pb]): fig, axes = plot_losses(h[k], h_val[k], patterns_to_del=['validation_', ' on validation set'], metrics=None, experiment_names=[name+ ' N=%i'%n for name in net_names], figsize=(15, 15), same_plot=True) for l, net in enumerate(nets): axes[l%len(axes), l//len(axes)].axvline(nb_epochs_after_conv[l][k][0], color='red', linestyle='--') conv_fig.tight_layout() conv_fig.savefig('%s_convergence_speed_networks.png'%preproc) ## Robustness plots fig, axes = plt.subplots(1, 3, figsize=(15, 5), squeeze=False) for k, pb in enumerate(['Age', 'Sex', 'Dx']): robustness_data = [get_pickle_obj( os.path.join(root, net, pb, 'Robustness_{net}_{pb}_{db}.pkl'.format(net=n, pb=pb, db=('SCZ_VIP' if pb=='Dx' else 'HCP_IXI')))) for net, n in zip(path_nets, nets)] for i, net in enumerate(net_names): std_noises = [std for std in robustness_data[i].keys() for _ in robustness_data[i][std]] if pb == 'Age': #score = [np.mean(np.abs(np.array(Y[0])-np.array(Y[1]))) for std in robustness_data[i] # for Y in robustness_data[i][std]] score = [LinearRegression().fit(np.array(Y[1]).reshape(-1, 1), np.array(Y[0]).reshape(-1, 1)). score(np.array(Y[1]).reshape(-1, 1), np.array(Y[0]).reshape(-1, 1)) for std in robustness_data[i] for Y in robustness_data[i][std]] elif pb in ['Sex', 'Dx']: score = [roc_auc_score(Y[1], np.array(Y[0])) for std in robustness_data[i] for Y in robustness_data[i][std]] seaborn.lineplot(x=std_noises, y=score, marker='x', label=net, ax=axes[0,k]) if pb in ['Sex', 'Dx']: axes[0,k].set_ylim([0.4, 1]) axes[0,k].set_xlabel('$\sigma$') axes[0,k].set_ylabel('$R^2$' if pb == 'Age' else 'AUC') axes[0,k].set_title('Robustness of various networks\n on {pb} Prediction problem'.format(pb=pb)) plt.savefig('robustness_curves_auc.png') plt.show() ## Losses plots of the benchmark problem = "Sex" files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets] val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', e=299) for n in nets] test_files = ['Test_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl'.format(net=n, pb=problem, db='HCP_IXI', fold=0, e=299) for n in nets] h = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, files)] h_val = [History.load(os.path.join(root, net, 'N_500', problem, file),folds=range(5)) for (net, file) in zip(path_nets, val_files)] tests = [get_pickle_obj(os.path.join(root, net, 'N_500', problem, file)) for (net, file) in zip(path_nets, test_files)] metrics = ['roc_auc', 'balanced_accuracy'] plot_losses(h, h_val, patterns_to_del=['validation_', ' on validation set'], metrics=metrics, experiment_names=net_names, #titles={'roc_auc': 'Gender prediction', 'balanced_accuracy': 'Gender Prediction'}, ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'}, ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]}, figsize=(15,15), same_plot=True, saving_path="sex_N_500_cnn_convergence.png") problem = "Dx" special_nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9'] files = ['Train_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99 if n not in special_nets else 100) for n in nets] val_files = ['Validation_{net}_{pb}_{db}_%s_epoch_{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99 if n not in special_nets else 100) for n in nets] test_files = ['Test_{net}_{pb}_{db}_fold%s_epoch{e}.pkl'.format(net=n, pb=problem, db='SCZ_VIP', e=99 if n not in special_nets else 100) for n in nets] h = [History.load(os.path.join(root, net, 'N_500', problem, file), folds=range(5)) for (net, file) in zip(path_nets, files)] h_val = [History.load(os.path.join(root, net, 'N_500', problem, file), folds=range(5)) for (net, file) in zip(path_nets, val_files)] metrics = ['roc_auc', 'balanced_accuracy'] plot_losses(h, h_val, patterns_to_del=['validation_', ' on validation set'], metrics=metrics, experiment_names=net_names, #titles={'roc_auc': 'Gender prediction', 'balanced_accuracy': 'Gender Prediction'}, ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'}, ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]}, figsize=(15,15), same_plot=True, saving_path="dx_N_500_cnn_convergence.png") # delta_age as predictor of the clinical status from scipy.stats import ks_2samp test_densenet = [get_pickle_obj(os.path.join(root, 'DenseNet', 'Age', 'Test_DenseNet_Age_HCP_IXI_fold0_epoch99.pkl')), get_pickle_obj(os.path.join(root, 'DenseNet', 'Age', 'Test_DenseNet_Age_BSNIP_SCZ_fold0_epoch99.pkl'))] mask = [np.array(test_densenet[i]['y_true']) < 30 for i in range(2)] absolute_error_min_age = [np.abs(np.array(test_densenet[i]['y_pred'])-np.array(test_densenet[i]['y_true']))[mask[i]] for i in range(2)] absolute_error = [np.abs(np.array(test_densenet[i]['y_pred'])-np.array(test_densenet[i]['y_true'])) for i in range(2)] # Significant KS-test for population with age < 30 ks_test_min_age = ks_2samp(absolute_error_min_age[0], absolute_error_min_age[1]) # ... But not after ks_test = ks_2samp(absolute_error[0], absolute_error[1]) fig, axes = plt.subplots(2, 2, figsize=(10, 10), squeeze=False) seaborn.distplot(np.array(test_densenet[0]['y_pred'])[mask[0]], ax=axes[0,0], norm_hist=True, label='Predicted Age') seaborn.distplot(np.array(test_densenet[0]['y_true'])[mask[0]], ax=axes[0,0], norm_hist=True, label='True Age') seaborn.distplot(np.array(test_densenet[1]['y_pred'])[mask[1]], ax=axes[0,1], norm_hist=True, label='Predicted Age') seaborn.distplot(np.array(test_densenet[1]['y_true'])[mask[1]], ax=axes[0,1], norm_hist=True, label='True Age') seaborn.distplot(np.array(test_densenet[1]['y_pred']), ax=axes[1,0], norm_hist=True, label='Predicted Age') seaborn.distplot(np.array(test_densenet[1]['y_true']), ax=axes[1,0], norm_hist=True, label='True Age') seaborn.distplot(np.array(test_densenet[1]['y_pred']), ax=axes[1,1], norm_hist=True, label='Predicted Age') seaborn.distplot(np.array(test_densenet[1]['y_true']), ax=axes[1,1], norm_hist=True, label='True Age') axes[0,0].set_title('Age Prediction on BSNIP for HC \nwith Age<30 (N=%i)'%mask[0].sum()) axes[0,1].set_title('Age Prediction on BSNIP for SCZ \nwith Age<30 (N=%i)'%mask[1].sum()) axes[1,0].set_title('Age Prediction on BSNIP for HC (N=200)') axes[1,1].set_title('Age Prediction on BSNIP for SCZ (N=194)') axes[0,0].legend() axes[0,1].legend() axes[1,0].legend() axes[1,1].legend() plt.savefig('delta_age_hist_analysis.png') fig, axes = plt.subplots(1, 2, figsize=(10, 5), squeeze=False) axes[0,0].boxplot(absolute_error_min_age, notch=True, labels=['HC (N=%i)'%mask[0].sum(), 'SCZ (N=%i)'%mask[1].sum()]) axes[0,0].text(1, 22, 'KS Statistic=%1.2e\np-value=%1.2e'% (ks_test_min_age.statistic, ks_test_min_age.pvalue), bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1')) axes[0,0].set_title('Absolute Error for Age Prediction on BSNIP\n with Age<30 (N=%i)'%(mask[0].sum()+mask[1].sum())) axes[0,1].boxplot(absolute_error, notch=True, labels=['HC (N=200)', 'SCZ (N=194)']) axes[0,1].text(1, 22, 'KS Statistic=%1.2e\np-value=%1.2e'% (ks_test.statistic, ks_test.pvalue), bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1')) axes[0,1].set_title('Absolute Error for Age Prediction on BSNIP (N=394)') plt.savefig('delta_age_err_analysis.png') ### Learning curves root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' net_names = ['ResNet34', 'DenseNet', 'tiny-VGG', 'tiny-DenseNet']#,'Linear Model'] nets = ['ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet_Exp9']#, "LinearModel"] path_nets = ['ResNet/ResNet34', 'DenseNet', 'ColeNet', 'TinyDenseNet']#, 'LinearModel'] preprocessings = ['quasi_raw', ''] preproc_names = ['Quasi-Raw', 'VBM'] sites = ['intra', 'inter'] site_names = ['Test on Same Sites', 'Test on Different Sites'] pbs = ["Age", "Sex", "Dx"] metrics = ['MAE $\\downarrow$', 'AUC $\\uparrow$', 'AUC $\\uparrow$'] all_metrics = {s: {preproc: {pb: dict() for pb in pbs} for preproc in preprocessings} for s in sites} nb_training_samples = [[100, 300, 500, 1000, 1600, 10000],[100, 300, 500, 1000, 1600, 10000], [100, 300, 500]] nb_epochs = [299] nb_folds_10K = 3 X = [[[n for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500))] for n in training] for i,training in enumerate(nb_training_samples)] all_results = {s:{preproc: {pb: {net if net!="LinearModel" else ('Ridge' if pb=='Age' else 'LogisticRegression'): [[[0 for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500))] for n in nb_training_samples[n_pb]] for e in nb_epochs] for net in nets} for n_pb, pb in enumerate(pbs)} for preproc in preprocessings} for s in sites} seaborn.set_style('darkgrid') for s in sites: fig, axes = plt.subplots(len(nb_epochs)*len(preprocessings), len(pbs), sharex='col', squeeze=False, figsize=(4.5*len(pbs), 3.5*len(nb_epochs)*len(preprocessings))) for p, (preproc, preproc_name) in enumerate(zip(preprocessings, preproc_names)): for n_pb, pb in enumerate(pbs): db = "HCP_IXI" if pb != "Dx" else "SCZ_VIP" for (name, net, path_net) in zip(net_names, nets, path_nets): if net == 'LinearModel': net = "Ridge" if pb == "Age" else "LogisticRegression" if pb == "Age" and preproc == "quasi_raw": break for i, e in enumerate(nb_epochs): if name == "Linear Model": e = 100 for j, n in enumerate(nb_training_samples[n_pb]): for k in range(nb_folds_10K+(5-nb_folds_10K)*(n<10000)+5*(n<500)): hyperparams = "_step_size_scheduler_10_gamma_0.7" \ if (net == "TinyDenseNet_Exp9" and pb == "Age" and n > 100 and (n < 1000 if s=='inter' else n<=1000)) else "_step_size_scheduler_10" try: path = os.path.join(root, preproc, path_net, 'N_{n}', pb, 'Test_{s}{net}_{pb}_{db}{hyper}_fold{k}_epoch{e}.pkl') all_results[s][preproc][pb][net][i][j][k] = get_pickle_obj( path.format(s='CV_' if s == 'intra' else '', net=net, pb=pb, db=db if n != 10000 else "Big_Healthy", hyper=hyperparams, k=k, n=n if n < 10000 else '10K', e=e)) except FileNotFoundError: path = os.path.join(root, preproc, path_net, 'N_{n}', pb, 'Test_{s}{net}_{pb}_{db}_fold{k}_epoch{e}.pkl') all_results[s][preproc][pb][net][i][j][k] = get_pickle_obj( path.format(s='CV_' if s == 'intra' else '', net=net, pb=pb, db=db if n != 10000 else "Big_Healthy", k=k, n=n if n < 10000 else '10K', e=e)) if pb == 'Age': # Compute MAE all_metrics[s][preproc][pb][net] = [ [[np.mean(np.abs(np.array(all_results[s][preproc][pb][net][e][i][k]['y_true']).ravel() - np.array(all_results[s][preproc][pb][net][e][i][k]['y_pred']).ravel())) for k in range(nb_folds_10K + (5 - nb_folds_10K) * (n < 10000) + 5 * (n < 500))] for i, n in enumerate(nb_training_samples[n_pb])] for e in range(len(nb_epochs))] if pb == 'Sex' or pb == "Dx": # Compute AUC all_metrics[s][preproc][pb][net] = [[[roc_auc_score(all_results[s][preproc][pb][net][e][i][k]['y_true'], all_results[s][preproc][pb][net][e][i][k]['y_pred']) for k in range( nb_folds_10K + (5 - nb_folds_10K) * (n < 10000) + 5 * (n < 500))] for i, n in enumerate(nb_training_samples[n_pb])] for e in range(len(nb_epochs))] for k, epoch in enumerate(nb_epochs): ax = seaborn.lineplot(x=sum(X[n_pb], []), y=sum(all_metrics[s][preproc][pb][net][k], []), marker='o', ax=axes[k*len(preprocessings)+p, n_pb], label=name) ax.get_legend().remove() if pb != "Dx": axes[k*len(preprocessings)+p, n_pb].set_xscale('log') axes[0,0].set_ylim(bottom=1) axes[0,1].set_ylim(top=1) # axes[1,1].tick_params(labelleft=True) # axes[1,0].tick_params(labelleft=True) # axes[2,1].tick_params(labelleft=True) # axes[2,0].tick_params(labelleft=True) for k, _epoch in enumerate(nb_epochs): left_ax = axes[k*len(preprocessings)+p, 0] left_ax.annotate(preproc_name, xy=(0, 0.5), xytext=(-left_ax.yaxis.labelpad - 5, 0), xycoords=left_ax.yaxis.label, textcoords='offset points', size='large', ha='right', va='center', fontweight='bold', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round')) for j, _pb in enumerate(pbs): axes[k*len(preprocessings)+p,j].set_ylabel(metrics[j]) axes[k*len(preprocessings)+p,j].set_xticks(nb_training_samples[j]) if k*len(preprocessings)+p == 0: axes[k*len(preprocessings)+p,j].set_title("{pb}".format(pb=_pb, n=_epoch), fontweight='bold', fontsize='x-large') if p == len(preprocessings)-1 and k == len(nb_epochs)-1: axes[k*len(preprocessings)+p,j].set_xlabel('# Training Samples') handles, _ = axes[0,1].get_legend_handles_labels() fig.legend(handles, net_names, ncol=len(nets), loc='lower center', fontsize='large', bbox_to_anchor=(0,0,1,0)) fig.tight_layout(w_pad=0.2, h_pad=0.2, rect=(0.02, 0.05, 1, 1)) fig.savefig('learning_curves_{site}.png'.format(site=s), format='png') plt.show() # Error between Quasi-Raw and CAT12 pre-processing as a function of # training samples acoss models + Difference between # In-Site and Out-Site training seaborn.set_style('ticks') colors = ['red', 'blue'] markers = ['o', '+', '*', 'v'] pbs = ['Age', 'Sex'] l_scatter = [] x_across_preproc_net, y_across_preproc_net = {s: dict() for s in sites}, {s: dict() for s in sites} #get_best_point = [np.min, np.max] #best_points = [1e8,-1e8] # for the first "site" for k, (s, s_name) in enumerate(zip(sites, site_names)): # Plots perf across networks for each preproc fig, axes = plt.subplots(1, len(pbs), figsize=(5.5 * len(pbs), 4.5), sharex=True, sharey='col') for i, (pb, metric) in enumerate(zip(pbs, metrics)): ax = axes[i] x_across_preproc_net[s][pb] = [n for net in nets for _ in preprocessings for n in nb_training_samples[i]] y_across_preproc_net[s][pb] = [np.mean(met) for net in nets for p in preprocessings for met in all_metrics[s][p][pb][net][0]] for j, (preproc, preproc_name, color) in enumerate(zip(preprocessings, preproc_names, colors)): for (net, net_name, m) in zip(nets, net_names, markers): h = ax.scatter(nb_training_samples[i], [np.mean(met) for met in all_metrics[s][preproc][pb][net][0]], c=color, marker=m) if i==0 and j==0 and k==0: l_scatter.append((h, net_name)) x = [n for net in nets for n in nb_training_samples[i]] y = [np.mean(met) for net in nets for met in all_metrics[s][preproc][pb][net][0]] seaborn.lineplot(x, y, ax=ax, label=preproc_name, color=color) ax.set_xscale('log') ax.set_xlim([0.9*10**2, 10**4]) ax.set_ylabel(metric, rotation=0, labelpad=16) ax.tick_params(axis='x', labelbottom=True) #axes[k,i].axhline(best_points[i], linewidth=1.5, color='black', ls='--') ax.set_title(pb, fontweight='bold', fontsize=13) # if i ==0: ax.annotate(s_name, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - 5, 0), # xycoords=ax.yaxis.label, textcoords='offset points', # size='large', ha='right', va='center', fontweight='bold', # bbox=dict(facecolor='none', edgecolor='black', boxstyle='round')) if i > 0: ax.yaxis.tick_right() ax.yaxis.set_label_position("right") ax.set_xlabel("# Training Samples") if k > 0: seaborn.lineplot(x_across_preproc_net[sites[0]][pb], y_across_preproc_net[sites[0]][pb], ax=ax, label="Same Site", color="gray") ax.lines[-1].set_linestyle("--") ax.legend() fig.suptitle(s_name, fontweight="bold", fontsize=14, bbox=dict(facecolor='none', edgecolor='black', boxstyle='round')) fig.subplots_adjust(bottom=0.15, left=0.18, wspace=0.05, top=0.85) if k == 0: fig.savefig("intra_site_learning_curves.png") leg = fig.legend([l[0] for l in l_scatter], [l[1] for l in l_scatter], ncol=4, loc='lower center', bbox_to_anchor=(0,0, 1, 0), fontsize='large') for handler in leg.legendHandles: handler.set_color('black') fig.subplots_adjust(bottom=0.2, left=0.18, wspace=0.05, top=0.85) fig.savefig("inter_site_learning_curves.png") ## Plots in-site/out-site perf across networks and pre-processing fig_in_out_site, axes_pbs = plt.subplots(1, len(pbs), figsize=(4.5*len(pbs), 4.5)) for i, (pb, metric) in enumerate(zip(pbs, metrics)): for k, (s, s_name) in enumerate(zip(sites, site_names)): seaborn.lineplot(x_across_preproc_net[s][pb], y_across_preproc_net[s][pb], ax=axes_pbs[i], label=s_name) axes_pbs[i].set_xscale('log') axes_pbs[i].set_xlim([0.9 * 10 ** 2, 10 ** 4]) axes_pbs[i].set_ylabel(metric, rotation=0, labelpad=16) if i > 0: axes_pbs[i].yaxis.tick_right() axes_pbs[i].yaxis.set_label_position("right") axes_pbs[i].tick_params(axis='x', labelbottom=True) axes_pbs[i].set_xlabel("# Training Samples") axes_pbs[i].legend() axes_pbs[i].set_title(pb, fontweight='bold', fontsize=12) fig_in_out_site.suptitle("Site Effect in Brain MRI", fontweight="bold", fontsize=14) fig_in_out_site.subplots_adjust(wspace=0.1, top=0.85) fig_in_out_site.savefig("site_effect_learning_curve.png") ## Calibration Curves at N=500 root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' nets = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG11', 'TinyDenseNet_Exp9', 'SFCN'] net_names = ['ResNet18', 'ResNet34', 'ResNet50', 'ResNeXt', 'DenseNet', 'tiny-VGG', 'VGG11', 'tiny-DenseNet', 'SFCN'] path_nets = ['ResNet/ResNet18', 'ResNet/ResNet34', 'ResNet/ResNet50', 'ResNeXt', 'DenseNet', 'ColeNet', 'VGG/VGG11', 'TinyDenseNet', 'SFCN'] N = 500 problems = ['Dx', 'Sex'] epochs = [99, 299] dbs = ["SCZ_VIP", "HCP_IXI"] for i, (pb, db, e) in enumerate(zip(problems, dbs, epochs)): fig, axes = plt.subplots(3, 3, figsize=(15, 15)) for j, (net, name, path) in enumerate(zip(nets, net_names, path_nets)): res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb, "Test_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl". format(net=net,pb=pb,db=db,fold=k, e=e+(pb == "Dx" and net in ["ResNet34", "DenseNet", "ColeNet", "TinyDenseNet_Exp9"])))) for k in range(5)] frac_pos, mean_pred_proba = calibration_curve(res[0]['y_true'], expit(res[0]['y_pred'])) hist, bins = np.histogram(expit(res[0]['y_pred']), bins=5) axes[j%3, j//3].bar(bins[:-1], hist/len(res[0]['y_pred']), np.diff(bins), ls='--', fill=False, edgecolor='blue', align='edge') axes[j%3, j//3].plot(mean_pred_proba, frac_pos, 's-', color='red') axes[j%3, j//3].set_ylabel('Accuracy', color='red') axes[j%3, j//3].tick_params(axis='y', colors='red') sec_ax = axes[j%3,j//3].secondary_yaxis('right') sec_ax.tick_params(axis='y', colors='blue') sec_ax.set_ylabel('Fraction of Samples', color='blue') axes[j%3, j//3].set_xlabel('Confidence') axes[j%3, j//3].plot([0,1], [0,1], 'k:') axes[j%3, j//3].set_title(name, fontweight='bold') fig.tight_layout(pad=3.0) fig.savefig('%s_calibration_plots.png'%pb) plt.show() ## Performance + Calibration of Ensemble models/MC-Dropout (DenseNet and tiny-DenseNet) at N=500 root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' nets = ['DenseNet', 'TinyDenseNet_Exp9'] # TODO: add tiny-DenseNet net_names = ['DenseNet', 'tiny-DenseNet'] path_nets = ['DenseNet', 'TinyDenseNet'] epochs = {'Ensemble': [[99, 299, 299], [99, 299, 299]], 'MC': [[99, 299, 299], [99, 299, 299]]} colors = ['green', 'red'] T = list(range(1, 11)) N = 500 problems = ['Dx', 'Sex', 'Age'] l1_score = lambda x,y : np.mean(np.abs(np.array(x)-np.array(y))) metrics = {'Dx': [ECE_score, roc_auc_score], 'Sex': [ECE_score, roc_auc_score], 'Age': [AUCE_score, l1_score]} metric_names = {'Dx': ['ECE', 'AUC'], 'Sex': ['ECE', 'AUC'], 'Age': ['AUCE', 'MAE']} baselines = {'DenseNet': {'Dx': [0.172, 0.782], 'Sex': [0.171, 0.852], 'Age': [np.nan, 6.318]},\ 'TinyDenseNet_Exp9': {'Dx': [0.122, 0.791], 'Sex': [0.078, 0.879], 'Age': [np.nan, 6.97]}} row_names = ['Calibration Error', 'Performance'] dbs = ["SCZ_VIP", "HCP_IXI", "HCP_IXI"] bayesian_tests = [('Ensemble', 'Ensembling'), ('MC', 'Dropout/Concrete_Dropout')] bayesian_names = ["Deep Ensemble Learning", "MC-Dropout"] nb_folds = 5 # Calibration's improvement in terms of ECE/AUCE + Performance improvement for l, ((test, dir_test), test_name) in enumerate(zip(bayesian_tests, bayesian_names)): fig, axes = plt.subplots(2, len(problems), figsize=(len(problems)*4, 2*4), sharex=True, squeeze=False) for j, (pb, db) in enumerate(zip(problems, dbs)): for k, (metric, metric_name, row_name) in enumerate(zip(metrics[pb], metric_names[pb],row_names)): for i, (net, name, path) in enumerate(zip(nets, net_names, path_nets)): ax = axes[k,j] hyper = ""#"_GaussianLkd" if pb == "Age" and test == "MC" else "" res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb, dir_test, "{test}Test_{net}_{pb}_{db}{hyper}_fold{fold}_epoch{e}.pkl". format(test=test,net=net,pb=pb,db=db,hyper=hyper,fold=k, e=epochs[test][i][j]))) for k in range(nb_folds)] y_pred, y_true = np.array([res[f]['y'] for f in range(nb_folds)]),\ np.array([res[f]['y_true'] for f in range(nb_folds)])[:, :,0] if pb != "Age": y_pred = expit(y_pred) if metric_name == "AUCE": scores = [[metric(y_pred[k, :, :t].mean(axis=1), y_pred[k, :, :t].std(axis=1), y_true[k]) for k in range(nb_folds)] for t in T[1:]] # scores = [[metric(y_pred[k, :, :t, 0].mean(axis=1), # (y_pred[k, :, :t, 1]+y_pred[k, :, :t, 0]**2).mean(axis=1) - # y_pred[k, :, :t, 0].mean(axis=1)**2, y_true[k]) # for k in range(nb_folds)] for t in T[1:]] elif metric_name == "AUC": scores = [[metric(y_true[k], y_pred[k, :, :t].mean(axis=1)) for k in range(nb_folds)] for t in T] else: scores = [[metric(y_pred[k, :, :t].mean(axis=1), y_true[k]) for k in range(nb_folds)] for t in T] if metric_name != "AUCE": ax.axhline(baselines[net][pb][k], linewidth = 1, color = colors[i], ls = '--', label=name) ax.errorbar(T if metric_name != "AUCE" else T[1:], [np.mean(s) for s in scores], yerr=[np.std(s) for s in scores], capsize=3, ecolor=colors[i], color=colors[i], label=name+"+Epistemic Uncertainty") arrow = "\\uparrow" if metric_name == "AUC" else "\\downarrow" ax.set_ylabel('%s $%s$'%(metric_name, arrow), color='black') if j == 0: ax.annotate(row_name, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - 5, 0), xycoords=ax.yaxis.label, textcoords='offset points', size='large', ha='right', va='center', fontweight='bold', bbox=dict(facecolor='none', edgecolor='black', boxstyle='round')) ax.tick_params(axis='y', colors='black') if k ==0: ax.set_title("{pb}".format(pb=pb), fontsize="large", fontweight='bold') ax.xaxis.set_tick_params(which='both', labelbottom=True) ax.set_xticks(ticks=T) ax.set_xlabel('# Samples T') handles, names = axes[0, 0].get_legend_handles_labels() fig.legend(handles, names, loc='lower center', ncol=2, bbox_to_anchor=(0, 0, 1, 0), fontsize='medium') fig.suptitle(test_name, fontsize='xx-large', fontweight='bold') fig.subplots_adjust(left=0.2, top=0.9, bottom=0.15, wspace=0.3) fig.savefig('results/%s_calibration_performance.png'%test) # TODO: Calibration curves for DenseNet/tiny-DenseNet # fig_cal_curves, big_axes = plt.subplots(2*len(problems), 1, figsize=(2 * 5, len(problems) * 5), # sharey=True, squeeze=False, gridspec_kw={}) # for row, (big_ax, pb_name) in enumerate(zip(big_axes[:, 0], problems), start=1): # big_ax.set_title('{pb} Prediction'.format(pb=pb_name), fontweight='bold', fontsize=16) # big_ax.axis('off') # big_ax._frameon = False # big_ax.title.set_position([.5, 1.08]) # for j, (pb, db, e) in enumerate(zip(problems, dbs, epochs)): # for l, t in enumerate([T[0], T[-1]], start=1): # calibration_curves_axis.append(fig.add_subplot(len(problems), 2, 2 * j + 1 + l)) # for i, (net, name, path) in enumerate(zip(nets, net_names, path_nets)): # res = [get_pickle_obj(os.path.join(root, path, 'N_%i'%N, pb,'Ensembling', # "EnsembleTest_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl". # format(net=net,pb=pb,db=db,fold=k, e=e))) for k in range(5)] # y_pred, y_true = np.array([res[f]['y'] for f in range(5)]), np.array([res[f]['y_true'] for f in range(5)])[:, :,0] # y_pred = expit(y_pred) # AUC = [[roc_auc_score(y_true[k], y_pred[k,:,:t].mean(axis=1)) for k in range(5)] for t in T] # ECE = [[ECE_score(y_pred[k,:,:t].mean(axis=1), y_true[k]) for k in range(5)] for t in T] # ax.errorbar(T, [np.mean(ece) for ece in ECE], yerr=[np.std(ece) for ece in ECE], capsize=3, ecolor=colors[i], # color=colors[i], label=name) # # ax2 = ax.twinx() # # ax2.errorbar(T, [np.mean(auc) for auc in AUC], yerr=[np.std(auc) for auc in AUC], capsize=3, ecolor='blue', color='blue') # # ax2.set_ylabel('AUC', color='blue') # # ax2.tick_params(axis='y', colors='blue') # # ax2.set_ylim([0.5,0.95]) # # for l, t in enumerate([T[0], T[-1]], start=0): # frac_pos_and_mean_pred_proba = [calibration_curve(y_true[fold], y_pred[fold,:,:t].mean(axis=1)) # for fold in range(5)] # hist, bins = np.histogram(y_pred[0,:,:t].mean(axis=1), bins=5) # we assume they are all the same across the folds... # calibration_curves_axis[l].bar(bins[:-1], hist/len(y_true[0]), np.diff(bins), ls='--', # fill=False, edgecolor=colors[i], align='edge') # seaborn.lineplot(x=[mean_pred_prob for _ in range(5) for mean_pred_prob in # np.mean([frac_mean_k[1] for frac_mean_k in frac_pos_and_mean_pred_proba], axis=0)], # y=[m for frac_mean_k in frac_pos_and_mean_pred_proba for m in frac_mean_k[0]], # marker='o', ax=calibration_curves_axis[l], color=colors[i], label=name) # #ax.plot(mean_pred_proba, frac_pos, 's-', color='red') # calibration_curves_axis[l].set_ylabel('Fraction of samples / Accuracy', color='black') # calibration_curves_axis[l].tick_params(axis='y', colors='black') # #sec_ax = calibration_curves_axis[l].secondary_yaxis('right') # #sec_ax.tick_params(axis='y', colors='black') # #sec_ax.set_ylabel('Fraction of Samples', color='black') # calibration_curves_axis[l].set_xlabel('Confidence') # calibration_curves_axis[l].plot([0,1], [0,1], 'k:') # calibration_curves_axis[l].set_title('Calibration curve at T=%i'%t, fontsize=13) # calibration_curves_axis[l].legend() # ax.legend() # # fig.tight_layout(pad=2) # fig.savefig('ensemble_calibration_plots.png') # Predictive uncertainty quality improvement with Deep Ensemble for both low and high capacity models entropy_func = lambda sigma: - ((1-sigma) * np.log(1-sigma+1e-8) + sigma * np.log(sigma+1e-8)) colors = ['blue', 'green'] markers = ['o', '+'] T_models = [1, 10] data_retained = np.arange(0.1, 1.01, 0.1) fig, big_axes = plt.subplots(len(problems), 1, figsize=(7*len(nets), 7*len(problems)), sharex=True, squeeze=False) for row, (big_ax, pb_name) in enumerate(zip(big_axes[:,0], problems), start=1): big_ax.set_title('{pb} Prediction'.format(pb=pb_name), fontweight='bold', fontsize=16) big_ax.axis('off') big_ax._frameon=False big_ax.title.set_position([.5, 1.08]) for k, (pb, db, e ) in enumerate(zip(problems, dbs, epochs)): for i, (name, net, path) in enumerate(zip(net_names, nets, path_nets)): ax = fig.add_subplot(len(problems), len(nets), len(nets)*k+i+1) res = [get_pickle_obj(os.path.join(root, path, 'N_%i' % 500, pb, 'Ensembling', "EnsembleTest_{net}_{pb}_{db}_fold{fold}_epoch{e}.pkl". format(net=net, pb=pb, db=db, fold=k, e=e))) for k in range(5)] y_pred_ensemble, y_true = np.array([res[f]['y'] for f in range(5)]), np.array([res[f]['y_true'] for f in range(5)])[:, :, 0] for it_t, t in enumerate(T_models): y_pred = expit(y_pred_ensemble[:,:,:t]).mean(axis=2) # take the mean prob of Ensemble # Get the uncertainty (entropy) for correct/wrong predictions H_pred = entropy_func(y_pred) #MI = H_pred - entropy_func(expit(y_pred)).mean(axis=2) mask_corr = [(pred>0.5)==true for (pred, true) in zip(y_pred, y_true)] # Plot the performance (AUC, bAcc) as a function of the data retained based on the entropy H_pred_sorted = np.sort([H for H in H_pred]) threshold = [[H[int(th*(len(y_pred[m])-1))] for th in data_retained] for m, H in enumerate(H_pred_sorted)] # Threshold based on the entropy directly #threshold = [data_retained for _ in range(5)] y_pred_thresholded = [pred[H<=th] for m, (pred, H) in enumerate(zip(y_pred, H_pred)) for th in threshold[m]] y_true_thresholded = [true[H<=th] for m, (true, H) in enumerate(zip(y_true, H_pred)) for th in threshold[m]] auc = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)] seaborn.lineplot(x=[th*100 for _ in y_pred for th in data_retained], y=auc, marker=markers[it_t], label=(t>1)*'Ensemble '+'{net} (T={t})'.format(net=name,t=t), ax=ax, color=colors[i]) if it_t == 0: auc_random = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred, y_true) for th in data_retained] seaborn.lineplot(x=[th * 100 for _ in y_pred for th in data_retained], y=auc_random, marker='.', label='Random case', ax=ax, color='black') ax.set_ylabel('AUC') ax.set_xlabel('% Data Retained based on $\mathcal{H}$') if pb == "Dx": ax.set_ylim([0.76, 0.86]) if pb == "Sex": ax.set_ylim([0.80, 0.95]) ax.legend() ax.set_title(name, fontweight='bold', fontsize=14) fig.tight_layout(pad=2) fig.savefig('models_uncertainty_estimation_ensemble.png', format='png') ### Computes the entropy as a measure of (epistemic+aleatoric) uncertainty for wrong predictions and correct predictions ### + True Class Probability (TCP) as a histogram for well-classified/mis-classified examples root = '/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP' nets = ['TinyDenseNet_Exp9', 'DenseNet', 'Full_ColeNet', 'Full_ResNet34', 'DenseNet', 'ColeNet', 'ResNet34', 'DenseNet', 'ColeNet', 'ResNet34'] net_names = ['tiny-DenseNet', 'DenseNet', 'tiny-VGG', 'ResNet34', 'MC-Dropout DenseNet', 'MC-Dropout tiny-VGG', 'MC-Dropout ResNet34', 'Ensemble DenseNet', 'Ensemble tiny-VGG', 'Ensemble ResNet34'] path_nets = ['TinyDenseNet', 'DenseNet', 'ColeNet', 'ResNet/ResNet34', 'DenseNet/Dx/Dropout/Concrete_Dropout', 'ColeNet/Dx/Dropout/Concrete_Dropout', 'ResNet/ResNet34/Dx/Dropout/Concrete_Dropout', 'DenseNet/Dx/Ensembling', 'ColeNet/Dx/Ensembling', 'ResNet/ResNet34/Dx/Ensembling'] problem = "Dx" epochs = [49, 49, 49, 49, 49] entropy_func = lambda sigma: - ((1-sigma) * np.log(1-sigma+1e-8) + sigma * np.log(sigma+1e-8)) colors = ['blue', 'green', 'orange'] markers = ['o', '+', '^'] fig, axes = plt.subplots(1, 1, squeeze=False, figsize=(7, 7)) fig2, axes2 = plt.subplots(3, 3, squeeze=False, sharey='row', figsize=(15, 15)) for i, (name, net, path) in enumerate(zip(net_names, nets, path_nets)): if 'Concrete_Dropout' in path or 'Ensembling' in path: test = "MC" if "Concrete_Dropout" in path else "Ensemble" res = [get_pickle_obj(os.path.join(root, path, "{t}Test_{net}_Dx_SCZ_VIP_fold{k}_epoch{e}.pkl". format(t=test,net=net,k=k,e=e))) for (k,e) in enumerate(epochs)] y_pred, y_true = np.array([res[f]['y'] for f in range(5)]), np.array([res[f]['y_true'] for f in range(5)])[:,:, 0] y_pred = expit(y_pred).mean(axis=2) # take the mean prob of the MC-sampling or Ensemble else: res = [get_pickle_obj(os.path.join(root, path, problem, "Test_{net}_Dx_SCZ_VIP_fold{k}_epoch{e}.pkl". format(net=net,k=k,e=e))) for (k,e) in enumerate(epochs)] y_pred, y_true = expit(np.array([res[f]['y_pred'] for f in range(5)])), np.array([res[f]['y_true'] for f in range(5)]) # Get the uncertainty (entropy) for correct/wrong predictions H_pred = entropy_func(y_pred) #MI = H_pred - entropy_func(expit(y_pred)).mean(axis=2) mask_corr = [(pred>0.5)==true for (pred, true) in zip(y_pred, y_true)] # Plot the performance (AUC, bAcc) as a function of the data retained based on the entropy data_retained = np.arange(0.5, 1.01, 0.1) H_pred_sorted = np.sort([H for H in H_pred]) threshold = [[H[int(th*(len(y_pred[m])-1))] for th in data_retained] for m, H in enumerate(H_pred_sorted)] y_pred_thresholded = [pred[H<=th] for m, (pred, H) in enumerate(zip(y_pred, H_pred)) for th in threshold[m]] y_true_thresholded = [true[H<=th] for m, (true, H) in enumerate(zip(y_true, H_pred)) for th in threshold[m]] b_acc = [balanced_accuracy_score(true, pred>0.5) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)] auc = [roc_auc_score(true, pred) for (pred, true) in zip(y_pred_thresholded, y_true_thresholded)] TCP_err = [pred[~corr] * (pred[~corr]<=0.5) + (1-pred[~corr]) * (pred[~corr]>0.5) for (pred, corr) in zip(y_pred, mask_corr)] TCP_true = [pred[corr] * (pred[corr]>0.5) + (1-pred[corr]) * (pred[corr]<=0.5) for (pred, corr) in zip(y_pred, mask_corr)] seaborn.distplot(TCP_true[1], kde=False, label="Successes", ax=axes2[i%3,i//3], color='green') seaborn.distplot(TCP_err[1], kde=False, label="Errors", ax=axes2[i%3,i//3], color='red') axes2[i%3,i//3].set_title(format(name)) axes2[i%3,i//3].set_ylabel('True Class Probability') axes2[i%3,i//3].legend() seaborn.lineplot(x=[th for _ in y_pred for th in data_retained], y=auc, marker=markers[i//3], label=name, ax=axes[0,0], color=colors[i%3]) axes[0,0].set_ylabel('AUC') axes[0,0].set_xlabel('Data Retained based on $\mathcal{H}$') axes[0,0].set_ylim([0.7, 0.9]) axes[0,0].legend() fig.savefig('models_uncertainty_curves.png', format='png') fig2.savefig('true_class_probability_dx.png', format='png') plt.show() ## Demonstration of the effectiveness of Concrete Dropout h = [History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.2/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.5/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/Concrete_Dropout/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Train_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl')] h_val = [History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.2/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/p_0.5/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Dropout/Concrete_Dropout/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl'), History.load('/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/Validation_DenseNet_Dx_SCZ_VIP_4_epoch_49.pkl')] plot_losses(h, h_val, patterns_to_del=['validation_', ' on validation set'], metrics=['roc_auc', 'balanced_accuracy'], experiment_names=['Dropout p=0.2', 'Dropout p=0.5', 'Concrete Dropout', 'Deterministic'], ylabels={'roc_auc': 'AUC', 'balanced_accuracy': 'Balanced Accuracy'}, ylim={'roc_auc': [0, 1], 'balanced_accuracy': [0, 1]}, figsize=(15,15), same_plot=True, saving_path='MCDropout_DenseNet_Dx.png') ## Feature re-using inside DenseNet: when does it occur ? ## Output: a dict {Block: {(layer_0, layer_1): SVCCA(layer_0, layer_1)}} for each block B of DenseNet and a pair of layers # inside B stratif = {'train': {}, 'test': {'study': ['BSNIP'], 'diagnosis': ['control', 'schizophrenia']}} ## DenseNet121 # pretrained_path = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \ # "DenseNet_Dx_SCZ_VIP_4_epoch_49.pth" # output_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \ # "neurons_output_densenet121_fold4_epoch49.pkl" # output_distances_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/DenseNet/Dx/" \ # "svcca_output_densenet121_fold4_epoch49.pkl" #model = densenet121(num_classes=1, in_channels=1) # blocks_config = [6, 12, 24, 16] ## tiny-DenseNet pretrained_path = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \ "TinyDenseNet_Exp9_Dx_SCZ_VIP_4_epoch_49.pth" output_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \ "neurons_output_tiny_densenet_exp9_fold4_epoch49.pkl" output_distances_file = "/neurospin/psy_sbox/bd261576/checkpoints/regression_age_sex/Benchmark_IXI_HCP/TinyDenseNet/Dx/" \ "svcca_output_tiny_densenet_exp9_fold4_epoch49.pkl" model = _densenet('exp9', 16, (6, 12, 16), 64, False, False, num_classes=1) blocks_config = [6, 12, 16] target_layers = [['features.denseblock{i}.denselayer{j}.conv1'.format(i=i,j=j) for j in range(1,blocks_config[i-1]+1)] for i in range(1,len(blocks_config)+1)] target_layers_flatten = [("block%i" % (i + 1), "layer%i" % (j + 1)) for i, b in enumerate(target_layers) for j, l in enumerate(b)] N = len(target_layers_flatten) compute_outputs, compute_svcca = True, False if compute_outputs: device='cuda' dx_mapping = LabelMapping(schizophrenia=1, control=0) input_transforms = [Crop((1, 121, 128, 121)), Padding([1, 128, 128, 128]), Normalize()] manager = DataManager(CONFIG['input_path'], CONFIG['metadata_path'], batch_size=4, number_of_folds=1, labels=["diagnosis"], labels_transforms=[dx_mapping], custom_stratification=stratif, input_transforms=input_transforms, pin_memory=True, drop_last=False) loaders = manager.get_dataloader(test=True) net = model.to(device) net.load_state_dict(torch.load(pretrained_path)['model']) net.eval() hooks = [[CCAHook(net, l, cca_distance="svcca", svd_device=device) for l in block] for block in target_layers] ## Computes and stores the outputs of each network for all the test set outputs = {'block{}'.format(i): {'layer{}'.format(j): [] for j in range(1,blocks_config[i-1]+1)} for i in range(1,len(blocks_config)+1)} labels = [] pbar = tqdm(total=len(loaders.test), desc="Mini-Batch") for it, dataitem in enumerate(loaders.test): pbar.update() inputs = dataitem.inputs.to(device) labels.extend(dataitem.labels.detach().cpu().numpy()) out = net(inputs) for i, block in enumerate(target_layers): for j, layer in enumerate(block): outputs["block%i"%(i+1)]["layer%i"%(j+1)].extend(hooks[i][j].get_hooked_value().cpu().detach().numpy()) with open(output_file, 'wb') as f: pickle.dump(outputs, f) else: outputs = get_pickle_obj(output_file) if compute_svcca: device = 'cpu' ## Loads the outputs and computes the distances between all layers and store them distances_matrix = np.zeros((N, N)) print('Transforming all npy arrays to torch tensors...', flush=True) output_tensors = {b: {l: torch.tensor(outputs[b][l], device=device) for l in outputs[b]} for b in outputs} sizes = [16, 8, 8, 4] pbar = tqdm(total=N * (N + 1) / 2, desc="Nb couples done") for i in range(N): for j in range(i, N): pbar.update() (blocki, layeri), (blockj, layerj) = target_layers_flatten[i], target_layers_flatten[j] n_blocki, n_blockj = int(blocki[5:]), int(blockj[5:]) # Computes the distances between the 2 representations distances_matrix[i, j] = 1 - CCAHook._conv3d(output_tensors[blocki][layeri], output_tensors[blockj][layerj], svcca_distance, sizes[n_blocki - 1], sizes[n_blockj - 1], same_layer=False, accept_rate=0.5)['distance'] with open(output_distances_file, 'wb') as f: pickle.dump({"target_layers_flatten": target_layers_flatten, "svcca_matrix": distances_matrix}, f) else: svcca_results = get_pickle_obj(output_distances_file) distances = np.array(svcca_results['svcca_matrix']) distances =
np.maximum(distances, distances.T)
numpy.maximum
""" Original Author: <NAME> """ import numpy as np import ctypes as ct import cv2 import sys import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) showsz=800 mousex,mousey=0.5,0.5 zoom=1.0 changed=True def onmouse(*args): global mousex,mousey,changed y=args[1] x=args[2] mousex=x/float(showsz) mousey=y/float(showsz) changed=True cv2.namedWindow('show3d') cv2.moveWindow('show3d',0,0) cv2.setMouseCallback('show3d',onmouse) dll=np.ctypeslib.load_library(os.path.join(BASE_DIR, 'render_balls_so.so'),'.') def showpoints(xyz,c_gt=None, c_pred = None ,waittime=0,showrot=False,magnifyBlue=0,freezerot=False,background=(0,0,0), normalizecolor=True, ballradius=10, legend_gt=None, legend_pred=None): global showsz,mousex,mousey,zoom,changed xyz=xyz-xyz.mean(axis=0) radius=((xyz**2).sum(axis=-1)**0.5).max() xyz/=(radius*2.2)/showsz if c_gt is None: c0=np.zeros((len(xyz),),dtype='float32')+255 c1=np.zeros((len(xyz),),dtype='float32')+255 c2=np.zeros((len(xyz),),dtype='float32')+255 else: c0=c_gt[:,0] c1=c_gt[:,1] c2=c_gt[:,2] legend_tmp = legend_gt legend = [] if normalizecolor: c0/=(np.max(c0)+1e-14)/255.0 c1/=(np.max(c1)+1e-14)/255.0 c2/=(np.max(c2)+1e-14)/255.0 max_red = 0 max_gre = 0 max_blu = 0 for legend_item in legend_tmp: if legend_item[2][0] > max_red: max_red = legend_item[2][0] if legend_item[2][1] > max_gre: max_gre = legend_item[2][1] if legend_item[2][2] > max_blu: max_blu = legend_item[2][2] for legend_item in legend_tmp: red = legend_item[2][0] / ((max_red+1e-14)/255.0) gre = legend_item[2][1] / ((max_gre+1e-14)/255.0) blu = legend_item[2][2] / ((max_blu+1e-14)/255.0) norm_color = (red, gre, blu) legend.append((legend_item[0], legend_item[1], norm_color)) else: for legend_item in legend_tmp: legend.append((legend_item[0], legend_item[1], legend_item[2])) c0=np.require(c0,'float32','C') c1=np.require(c1,'float32','C') c2=np.require(c2,'float32','C') show=np.zeros((showsz,showsz,3),dtype='uint8') def render(): rotmat=np.eye(3) if not freezerot: xangle=(mousey-0.5)*np.pi*1.2 else: xangle=0 rotmat=rotmat.dot(np.array([ [1.0,0.0,0.0], [0.0,np.cos(xangle),-np.sin(xangle)], [0.0,np.sin(xangle),np.cos(xangle)], ])) if not freezerot: yangle=(mousex-0.5)*np.pi*1.2 else: yangle=0 rotmat=rotmat.dot(np.array([ [np.cos(yangle),0.0,-np.sin(yangle)], [0.0,1.0,0.0], [
np.sin(yangle)
numpy.sin
import unittest import qteasy as qt import pandas as pd from pandas import Timestamp import numpy as np import math from numpy import int64 import itertools import datetime from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list from qteasy.space import Space, Axis, space_around_centre, ResultPool from qteasy.core import apply_loop from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX from qteasy.tsfuncs import income, indicators, name_change, get_bar from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp from qteasy.evaluate import eval_volatility from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax from qteasy.tafuncs import minmaxindex, mult, sub, sum from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel from qteasy.database import DataSource from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs from qteasy.blender import _exp_to_token, blender_parser, signal_blend class TestCost(unittest.TestCase): def setUp(self): self.amounts = np.array([10000., 20000., 10000.]) self.op = np.array([0., 1., -0.33333333]) self.amounts_to_sell = np.array([0., 0., -3333.3333]) self.cash_to_spend = np.array([0., 20000., 0.]) self.prices = np.array([10., 20., 10.]) self.r = qt.Cost(0.0) def test_rate_creation(self): """测试对象生成""" print('testing rates objects\n') self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate') self.assertEqual(self.r.buy_fix, 0) self.assertEqual(self.r.sell_fix, 0) def test_rate_operations(self): """测试交易费率对象""" self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect') self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong') self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect') self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect') self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect') self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect') self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect') self.assertEqual(np.allclose(self.r.calculate(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong') def test_rate_fee(self): """测试买卖交易费率""" self.r.buy_rate = 0.003 self.r.sell_rate = 0.001 self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 0. self.r.sell_min = 0. self.r.slipage = 0. print('\nSell result with fixed rate = 0.001 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect') print('\nSell result with fixed rate = 0.001 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect') print('\nSell result with fixed rate = 0.001 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 1:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect') print('\nPurchase result with fixed rate = 0.003 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect') self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect') def test_min_fee(self): """测试最低交易费用""" self.r.buy_rate = 0. self.r.sell_rate = 0. self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 300 self.r.sell_min = 300 self.r.slipage = 0. print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10) self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect') self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect') print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 33033.333) self.assertAlmostEqual(test_min_fee_result[2], 300.0) print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 33030) self.assertAlmostEqual(test_min_fee_result[2], 300.0) print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect') self.assertAlmostEqual(test_min_fee_result[1], 32700) self.assertAlmostEqual(test_min_fee_result[2], 300.0) def test_rate_with_min(self): """测试最低交易费用对其他交易费率参数的影响""" self.r.buy_rate = 0.0153 self.r.sell_rate = 0.01 self.r.buy_fix = 0. self.r.sell_fix = 0. self.r.buy_min = 300 self.r.sell_min = 333 self.r.slipage = 0. print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect') print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect') print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect') self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect') print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967) self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333) print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7) self.assertAlmostEqual(test_rate_with_min_result[2], 333.3) print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect') self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0) self.assertAlmostEqual(test_rate_with_min_result[2], 333.0) def test_fixed_fee(self): """测试固定交易费用""" self.r.buy_rate = 0. self.r.sell_rate = 0. self.r.buy_fix = 200 self.r.sell_fix = 150 self.r.buy_min = 0 self.r.sell_min = 0 self.r.slipage = 0 print('\nselling result of fixed cost with fixed fee = 150 and moq=0:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect') print('\nselling result of fixed cost with fixed fee = 150 and moq=100:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True, f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]') self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect') print('\npurchase result of fixed cost with fixed fee = 200:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect') print('\npurchase result of fixed cost with fixed fee = 200:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect') def test_slipage(self): """测试交易滑点""" self.r.buy_fix = 0 self.r.sell_fix = 0 self.r.buy_min = 0 self.r.sell_min = 0 self.r.buy_rate = 0.003 self.r.sell_rate = 0.001 self.r.slipage = 1E-9 print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)) print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:') print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)) print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:') print(self.r.get_selling_result(self.prices, self.amounts_to_sell)) test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell) self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]') self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591, msg=f'{test_fixed_fee_result[1]} does not equal to 99890.') self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409, msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.') test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect') test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100) self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect') self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect') self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect') class TestSpace(unittest.TestCase): def test_creation(self): """ test if creation of space object is fine """ # first group of inputs, output Space with two discr axis from [0,10] print('testing space objects\n') # pars_list = [[(0, 10), (0, 10)], # [[0, 10], [0, 10]]] # # types_list = ['discr', # ['discr', 'discr']] # # input_pars = itertools.product(pars_list, types_list) # for p in input_pars: # # print(p) # s = qt.Space(*p) # b = s.boes # t = s.types # # print(s, t) # self.assertIsInstance(s, qt.Space) # self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') # self.assertEqual(t, ['discr', 'discr'], 'types incorrect') # pars_list = [[(0, 10), (0, 10)], [[0, 10], [0, 10]]] types_list = ['foo, bar', ['foo', 'bar']] input_pars = itertools.product(pars_list, types_list) for p in input_pars: # print(p) s = Space(*p) b = s.boes t = s.types # print(s, t) self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') self.assertEqual(t, ['enum', 'enum'], 'types incorrect') pars_list = [[(0, 10), (0, 10)], [[0, 10], [0, 10]]] types_list = [['discr', 'foobar']] input_pars = itertools.product(pars_list, types_list) for p in input_pars: # print(p) s = Space(*p) b = s.boes t = s.types # print(s, t) self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!') self.assertEqual(t, ['discr', 'enum'], 'types incorrect') pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) self.assertEqual(s.types, ['conti', 'discr']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10.0, 11)) self.assertEqual(s.shape, (np.inf, 11)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types='conti, enum') self.assertEqual(s.types, ['conti', 'enum']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10.0, 2)) self.assertEqual(s.shape, (np.inf, 2)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) pars_list = [(1, 2), (2, 3), (3, 4)] s = Space(pars=pars_list) self.assertEqual(s.types, ['discr', 'discr', 'discr']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (2, 2, 2)) self.assertEqual(s.shape, (2, 2, 2)) self.assertEqual(s.count, 8) self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)]) pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)] s = Space(pars=pars_list) self.assertEqual(s.types, ['enum', 'enum', 'enum']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (3, 3, 3)) self.assertEqual(s.shape, (3, 3, 3)) self.assertEqual(s.count, 27) self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)]) pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))] s = Space(pars=pars_list) self.assertEqual(s.types, ['enum']) self.assertEqual(s.dim, 1) self.assertEqual(s.size, (3,)) self.assertEqual(s.shape, (3,)) self.assertEqual(s.count, 3) pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5)) s = Space(pars=pars_list) self.assertEqual(s.types, ['enum', 'enum', 'enum']) self.assertEqual(s.dim, 3) self.assertEqual(s.size, (3, 3, 3)) self.assertEqual(s.shape, (3, 3, 3)) self.assertEqual(s.count, 27) self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)]) def test_extract(self): """ :return: """ pars_list = [(0, 10), (0, 10)] types_list = ['discr', 'discr'] s = Space(pars=pars_list, par_types=types_list) extracted_int, count = s.extract(3, 'interval') extracted_int_list = list(extracted_int) print('extracted int\n', extracted_int_list) self.assertEqual(count, 16, 'extraction count wrong!') self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3), (3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9), (9, 0), (9, 3), (9, 6), (9, 9)], 'space extraction wrong!') extracted_rand, count = s.extract(10, 'rand') extracted_rand_list = list(extracted_rand) self.assertEqual(count, 10, 'extraction count wrong!') print('extracted rand\n', extracted_rand_list) for point in list(extracted_rand_list): self.assertEqual(len(point), 2) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertLessEqual(point[1], 10) self.assertGreaterEqual(point[1], 0) pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) extracted_int2, count = s.extract(3, 'interval') self.assertEqual(count, 16, 'extraction count wrong!') extracted_int_list2 = list(extracted_int2) self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3), (3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9), (9, 0), (9, 3), (9, 6), (9, 9)], 'space extraction wrong!') print('extracted int list 2\n', extracted_int_list2) self.assertIsInstance(extracted_int_list2[0][0], float) self.assertIsInstance(extracted_int_list2[0][1], (int, int64)) extracted_rand2, count = s.extract(10, 'rand') self.assertEqual(count, 10, 'extraction count wrong!') extracted_rand_list2 = list(extracted_rand2) print('extracted rand list 2:\n', extracted_rand_list2) for point in extracted_rand_list2: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], float) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertIsInstance(point[1], (int, int64)) self.assertLessEqual(point[1], 10) self.assertGreaterEqual(point[1], 0) pars_list = [(0., 10), ('a', 'b')] s = Space(pars=pars_list, par_types='enum, enum') extracted_int3, count = s.extract(1, 'interval') self.assertEqual(count, 4, 'extraction count wrong!') extracted_int_list3 = list(extracted_int3) self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')], 'space extraction wrong!') print('extracted int list 3\n', extracted_int_list3) self.assertIsInstance(extracted_int_list3[0][0], float) self.assertIsInstance(extracted_int_list3[0][1], str) extracted_rand3, count = s.extract(3, 'rand') self.assertEqual(count, 3, 'extraction count wrong!') extracted_rand_list3 = list(extracted_rand3) print('extracted rand list 3:\n', extracted_rand_list3) for point in extracted_rand_list3: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], (float, int)) self.assertLessEqual(point[0], 10) self.assertGreaterEqual(point[0], 0) self.assertIsInstance(point[1], str) self.assertIn(point[1], ['a', 'b']) pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))] s = Space(pars=pars_list, par_types='enum') extracted_int4, count = s.extract(1, 'interval') self.assertEqual(count, 4, 'extraction count wrong!') extracted_int_list4 = list(extracted_int4) it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)]) for item, item2 in it: print(item, item2) self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it])) print('extracted int list 4\n', extracted_int_list4) self.assertIsInstance(extracted_int_list4[0], tuple) extracted_rand4, count = s.extract(3, 'rand') self.assertEqual(count, 3, 'extraction count wrong!') extracted_rand_list4 = list(extracted_rand4) print('extracted rand list 4:\n', extracted_rand_list4) for point in extracted_rand_list4: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], (int, str)) self.assertIn(point[0], [0, 1, 'a']) self.assertIsInstance(point[1], (int, str)) self.assertIn(point[1], [10, 14, 'b', 'c']) self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)]) pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)] s = Space(pars=pars_list, par_types='enum, discr') extracted_int5, count = s.extract(1, 'interval') self.assertEqual(count, 16, 'extraction count wrong!') extracted_int_list5 = list(extracted_int5) for item, item2 in extracted_int_list5: print(item, item2) self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it])) print('extracted int list 5\n', extracted_int_list5) self.assertIsInstance(extracted_int_list5[0], tuple) extracted_rand5, count = s.extract(5, 'rand') self.assertEqual(count, 5, 'extraction count wrong!') extracted_rand_list5 = list(extracted_rand5) print('extracted rand list 5:\n', extracted_rand_list5) for point in extracted_rand_list5: self.assertEqual(len(point), 2) self.assertIsInstance(point[0], tuple) print(f'type of point[1] is {type(point[1])}') self.assertIsInstance(point[1], (int, np.int64)) self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)]) print(f'test incremental extraction') pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)] s = Space(pars_list) ext, count = s.extract(64, 'interval') self.assertEqual(count, 4096) points = list(ext) # 已经取出所有的点,围绕其中10个点生成十个subspaces # 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确 for point in points[1000:1010]: subspace = s.from_point(point, 64) self.assertIsInstance(subspace, Space) self.assertTrue(subspace in s) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) ext, count = subspace.extract(32) points = list(ext) self.assertGreaterEqual(count, 512) self.assertLessEqual(count, 4096) print(f'\n---------------------------------' f'\nthe space created around point <{point}> is' f'\n{subspace.boes}' f'\nand extracted {count} points, the first 5 are:' f'\n{points[:5]}') def test_axis_extract(self): # test axis object with conti type axis = Axis((0., 5)) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'conti') self.assertEqual(axis.axis_boe, (0., 5.)) self.assertEqual(axis.count, np.inf) self.assertEqual(axis.size, 5.0) self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.])) self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5])) extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(0 <= item <= 5) for item in extracted])) # test axis object with discrete type axis = Axis((1, 5)) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'discr') self.assertEqual(axis.axis_boe, (1, 5)) self.assertEqual(axis.count, 5) self.assertEqual(axis.size, 5) self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5])) self.assertRaises(ValueError, axis.extract, 0.5, 'int') extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted])) # test axis object with enumerate type axis = Axis((1, 5, 7, 10, 'A', 'F')) self.assertIsInstance(axis, Axis) self.assertEqual(axis.axis_type, 'enum') self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F')) self.assertEqual(axis.count, 6) self.assertEqual(axis.size, 6) self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F']) self.assertRaises(ValueError, axis.extract, 0.5, 'int') extracted = axis.extract(8, 'rand') self.assertEqual(len(extracted), 8) self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted])) def test_from_point(self): """测试从一个点生成一个space""" # 生成一个space,指定space中的一个点以及distance,生成一个sub-space pars_list = [(0., 10), (0, 10)] s = Space(pars=pars_list, par_types=None) self.assertEqual(s.types, ['conti', 'discr']) self.assertEqual(s.dim, 2) self.assertEqual(s.size, (10., 11)) self.assertEqual(s.shape, (np.inf, 11)) self.assertEqual(s.count, np.inf) self.assertEqual(s.boes, [(0., 10), (0, 10)]) print('create subspace from a point in space') p = (3, 3) distance = 2 subspace = s.from_point(p, distance) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'discr']) self.assertEqual(subspace.dim, 2) self.assertEqual(subspace.size, (4.0, 5)) self.assertEqual(subspace.shape, (np.inf, 5)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(1, 5), (1, 5)]) print('create subspace from a 6 dimensional discrete space') s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)]) p = (15, 200, 150, 150, 150, 150) d = 10 subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 65345616) self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21)) self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21)) self.assertEqual(subspace.count, 65345616) self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)]) print('create subspace from a 6 dimensional continuous space') s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]) p = (15, 200, 150, 150, 150, 150) d = 10 subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 48000000) self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0)) self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)]) print('create subspace with different distances on each dimension') s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]) p = (15, 200, 150, 150, 150, 150) d = [10, 5, 5, 10, 10, 5] subspace = s.from_point(p, d) self.assertIsInstance(subspace, Space) self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti']) self.assertEqual(subspace.dim, 6) self.assertEqual(subspace.volume, 6000000) self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0)) self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf)) self.assertEqual(subspace.count, np.inf) self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)]) class TestCashPlan(unittest.TestCase): def setUp(self): self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1) self.cp1.info() self.cp2 = qt.CashPlan(['20100501'], 10000) self.cp2.info() self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01', freq='Y', periods=12), [i * 1000 + 10000 for i in range(12)], 0.035) self.cp3.info() def test_creation(self): self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong') self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong') self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong') # test __repr__() print(self.cp1) print(self.cp2) print(self.cp3) # test __str__() self.cp1.info() self.cp2.info() self.cp3.info() # test assersion errors self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000]) self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000) def test_properties(self): self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong') self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01')) self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01')) self.assertEqual(self.cp1.investment_count, 2) self.assertEqual(self.cp1.period, 730) self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')]) self.assertEqual(self.cp1.ir, 0.1) self.assertAlmostEqual(self.cp1.closing_value, 34200) self.assertAlmostEqual(self.cp2.closing_value, 10000) self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685) self.assertIsInstance(self.cp1.plan, pd.DataFrame) self.assertIsInstance(self.cp2.plan, pd.DataFrame) self.assertIsInstance(self.cp3.plan, pd.DataFrame) def test_operation(self): cp_self_add = self.cp1 + self.cp1 cp_add = self.cp1 + self.cp2 cp_add_int = self.cp1 + 10000 cp_mul_int = self.cp1 * 2 cp_mul_float = self.cp2 * 1.5 cp_mul_time = 3 * self.cp2 cp_mul_time2 = 2 * self.cp1 cp_mul_time3 = 2 * self.cp3 cp_mul_float2 = 2. * self.cp3 self.assertIsInstance(cp_self_add, qt.CashPlan) self.assertEqual(cp_self_add.amounts, [40000, 20000]) self.assertEqual(cp_add.amounts, [20000, 10000, 10000]) self.assertEqual(cp_add_int.amounts, [30000, 20000]) self.assertEqual(cp_mul_int.amounts, [40000, 20000]) self.assertEqual(cp_mul_float.amounts, [15000]) self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')]) self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000]) self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'), Timestamp('2011-05-01'), Timestamp('2012-04-30')]) self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000]) self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01'), Timestamp('2014-01-01'), Timestamp('2016-01-01')]) self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'), Timestamp('2020-12-31'), Timestamp('2021-12-31'), Timestamp('2022-12-31'), Timestamp('2023-12-31'), Timestamp('2024-12-31'), Timestamp('2025-12-31'), Timestamp('2026-12-31'), Timestamp('2027-12-31'), Timestamp('2028-12-31'), Timestamp('2029-12-31'), Timestamp('2030-12-31'), Timestamp('2031-12-29'), Timestamp('2032-12-29'), Timestamp('2033-12-29'), Timestamp('2034-12-29'), Timestamp('2035-12-29'), Timestamp('2036-12-29'), Timestamp('2037-12-29'), Timestamp('2038-12-29'), Timestamp('2039-12-29'), Timestamp('2040-12-29'), Timestamp('2041-12-29'), Timestamp('2042-12-29')]) self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'), Timestamp('2020-12-31'), Timestamp('2021-12-31'), Timestamp('2022-12-31'), Timestamp('2023-12-31'), Timestamp('2024-12-31'), Timestamp('2025-12-31'), Timestamp('2026-12-31'), Timestamp('2027-12-31'), Timestamp('2028-12-31'), Timestamp('2029-12-31'), Timestamp('2030-12-31')]) self.assertEqual(cp_mul_float2.amounts, [20000.0, 22000.0, 24000.0, 26000.0, 28000.0, 30000.0, 32000.0, 34000.0, 36000.0, 38000.0, 40000.0, 42000.0]) class TestPool(unittest.TestCase): def setUp(self): self.p = ResultPool(5) self.items = ['first', 'second', (1, 2, 3), 'this', 24] self.perfs = [1, 2, 3, 4, 5] self.additional_result1 = ('abc', 12) self.additional_result2 = ([1, 2], -1) self.additional_result3 = (12, 5) def test_create(self): self.assertIsInstance(self.p, ResultPool) def test_operation(self): self.p.in_pool(self.additional_result1[0], self.additional_result1[1]) self.p.cut() self.assertEqual(self.p.item_count, 1) self.assertEqual(self.p.items, ['abc']) for item, perf in zip(self.items, self.perfs): self.p.in_pool(item, perf) self.assertEqual(self.p.item_count, 6) self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24]) self.p.cut() self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc']) self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12]) self.p.in_pool(self.additional_result2[0], self.additional_result2[1]) self.p.in_pool(self.additional_result3[0], self.additional_result3[1]) self.assertEqual(self.p.item_count, 7) self.p.cut(keep_largest=False) self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24]) self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5]) class TestCoreSubFuncs(unittest.TestCase): """Test all functions in core.py""" def setUp(self): pass def test_input_to_list(self): print('Testing input_to_list() function') input_str = 'first' self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first']) self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first']) self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first']) input_list = ['first', 'second'] self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None]) self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder']) self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second']) self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second']) def test_point_in_space(self): sp = Space([(0., 10.), (0., 10.), (0., 10.)]) p1 = (5.5, 3.2, 7) p2 = (-1, 3, 10) self.assertTrue(p1 in sp) print(f'point {p1} is in space {sp}') self.assertFalse(p2 in sp) print(f'point {p2} is not in space {sp}') sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum') p1 = (5.5, 3.2, 8) self.assertTrue(p1 in sp) print(f'point {p1} is in space {sp}') def test_space_in_space(self): print('test if a space is in another space') sp = Space([(0., 10.), (0., 10.), (0., 10.)]) sp2 = Space([(0., 10.), (0., 10.), (0., 10.)]) self.assertTrue(sp2 in sp) self.assertTrue(sp in sp2) print(f'space {sp2} is in space {sp}\n' f'and space {sp} is in space {sp2}\n' f'they are equal to each other\n') sp2 = Space([(0, 5.), (2, 7.), (3., 9.)]) self.assertTrue(sp2 in sp) self.assertFalse(sp in sp2) print(f'space {sp2} is in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'{sp2} is a sub space of {sp}\n') sp2 = Space([(0, 5), (2, 7), (3., 9)]) self.assertFalse(sp2 in sp) self.assertFalse(sp in sp2) print(f'space {sp2} is not in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'they have different types of axes\n') sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)]) self.assertFalse(sp in sp2) self.assertFalse(sp2 in sp) print(f'space {sp2} is not in space {sp}\n' f'and space {sp} is not in space {sp2}\n' f'they have different types of axes\n') def test_space_around_centre(self): sp = Space([(0., 10.), (0., 10.), (0., 10.)]) p1 = (5.5, 3.2, 7) ssp = space_around_centre(space=sp, centre=p1, radius=1.2) print(ssp.boes) print('\ntest multiple diameters:') self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)]) ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1]) print(ssp.boes) self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)]) print('\ntest points on edge:') p2 = (5.5, 3.2, 10) ssp = space_around_centre(space=sp, centre=p1, radius=3.9) print(ssp.boes) self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)]) print('\ntest enum spaces') sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum') p1 = [34, 12] ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False) self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)]) print(ssp.boes) print('\ntest enum space and ignore enum axis') ssp = space_around_centre(space=sp, centre=p1, radius=5) self.assertEqual(ssp.boes, [(29, 39), (40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)]) print(sp.boes) def test_get_stock_pool(self): print(f'start test building stock pool function\n') share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange') print(f'\nselect all stocks by area') stock_pool = qt.get_stock_pool(area='上海') print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are "上海"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all()) print(f'\nselect all stocks by multiple areas') stock_pool = qt.get_stock_pool(area='贵州,北京,天津') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州', '北京', '天津']).all()) print(f'\nselect all stocks by area and industry') stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all()) print(f'\nselect all stocks by industry') stock_pool = qt.get_stock_pool(industry='银行, 金融') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stocks industry in ["银行", "金融"]\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all()) print(f'\nselect all stocks by market') stock_pool = qt.get_stock_pool(market='主板') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock market is "主板"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all()) print(f'\nselect all stocks by market and list date') stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all stock market is "主板", and list date after "2000-01-01"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all()) print(f'\nselect all stocks by list date') stock_pool = qt.get_stock_pool(date='1997-01-01') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all list date after "1997-01-01"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all()) print(f'\nselect all stocks by exchange') stock_pool = qt.get_stock_pool(exchange='SSE') print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all exchanges are "SSE"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all()) print(f'\nselect all stocks by industry, area and list date') industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产', '酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃', '家用电器', '文教休闲', '其他商业', '元器件', 'IT设备', '其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件', '广告包装', '轻工机械', '新型电力', '多元金融', '饲料'] area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东', '安徽', '四川', '浙江', '湖南', '河北', '新疆', '山东', '河南', '山西', '江西', '青海', '湖北', '内蒙', '海南', '重庆', '陕西', '福建', '广西', '上海'] stock_pool = qt.get_stock_pool(date='19980101', industry=industry_list, area=area_list) print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n' f'check if all exchanges are "SSE"\n' f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}') self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all()) self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all()) self.assertRaises(KeyError, qt.get_stock_pool, industry=25) self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH') self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE') class TestEvaluations(unittest.TestCase): """Test all evaluation functions in core.py""" # 以下手动计算结果在Excel文件中 def setUp(self): """用np.random生成测试用数据,使用cumsum()模拟股票走势""" self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632, 6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665, 5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353, 5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677, 6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763, 6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003, 6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674, 6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109, 6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144, 6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523, 7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968, 7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015, 6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645, 6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041, 7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393, 6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693, 6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466, 6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206, 7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462, 7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474], columns=['value']) self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451, 4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551, 3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935, 4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529, 5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168, 5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151, 6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831, 5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697, 5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713, 6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168, 6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113, 7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161, 7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005, 7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395, 8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815, 9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222, 8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021, 10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045, 10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092, 10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375], columns=['value']) self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371, 5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247, 4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319, 5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386, 5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695, 5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783, 6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396, 6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263, 7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014, 8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943, 7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287, 7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519, 7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632, 6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015, 7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434, 7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561, 7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528, 8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364, 9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389, 7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113], columns=['value']) self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553, 5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749, 5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618, 6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379, 5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611, 5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266, 5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379, 5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577, 5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352, 5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863, 4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182, 6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794, 8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649, 8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245, 8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745, 8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872, 7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188, 7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091, 8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692, 8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591], columns=['value']) self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985, 3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051, 4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605, 3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158, 5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314, 5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908, 4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813, 4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576, 4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319, 4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746, 4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855, 3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401, 2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161, 3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979, 4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663, 4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156, 4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156, 5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017, 5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567, 4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048], columns=['value']) self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183, 4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482, 3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346, 3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872, 2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475, 2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111, 2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127, 2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959, 2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542, 2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246, 2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177, 1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791, 2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206, 1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998, 1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648, 1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546, 2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031, 3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406, 2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045, 3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795], columns=['value']) self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892, 4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953, 3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557, 4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126, 4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498, 4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741, 2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962, 1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663, 1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681, 1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759, 1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655, 1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128, 2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182, 3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017, 3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012, 3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199, 3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838, 3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759, 2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191, 3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313], columns=['value']) # 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程 self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288, 10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87, 11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423, 12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239, 11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97, 12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867, 12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64, 12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871, 12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649, 12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599, 12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82, 14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759, 12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655, 13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391, 12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687, 11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976, 11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861, 12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103, 13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9, 11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924, 12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388, 11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534, 11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352, 11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475, 11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89, 10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956, 11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558, 10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726, 10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542, 11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285, 10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239, 9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008, 9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079, 10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642, 10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597, 9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332, 10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158, 12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53, 13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744, 13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073, 12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811, 12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321, 12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199, 13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086, 15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215, 16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743, 16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225, 17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588, 17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148, 17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729, 17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493, 18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391, 17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108, 17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337, 17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64, 16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911, 16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811, 14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453, 15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698, 16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846, 16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794, 17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201, 17.617, 17.368, 17.864, 17.484], columns=['value']) self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662, 10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252, 10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55, 10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379, 11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034, 12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91, 13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228, 13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987, 13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207, 15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893, 14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994, 14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802, 14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978, 15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465, 16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292, 16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707, 16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127, 16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831, 16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876, 17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669, 17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412, 17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95, 16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781, 15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783, 17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423, 15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046, 15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092, 14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483, 15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789, 15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645, 16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416, 16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509, 17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744, 19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198, 19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3, 17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31, 16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673, 16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162, 15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039, 13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104, 14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019, 14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292, 14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294, 13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32, 13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362, 13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404, 13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649, 12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749, 11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244, 10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471, 11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054, 10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94, 10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853, 10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903, 9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557, 9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686, 8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996, 9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4, 9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293, 8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018, 9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738, 9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881, 9.7, 9.991, 10.391, 10.002], columns=['value']) def test_performance_stats(self): """test the function performance_statistics() """ pass def test_fv(self): print(f'test with test data and empty DataFrame') self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474) self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375) self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113) self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591) self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048) self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795) self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313) self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf) print(f'Error testing') self.assertRaises(AssertionError, eval_fv, 15) self.assertRaises(KeyError, eval_fv, pd.DataFrame([1, 2, 3], columns=['non_value'])) def test_max_drawdown(self): print(f'test with test data and empty DataFrame') self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308) self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53) self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849) self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0) self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10) self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19) self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899) self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90) self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684) self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14) self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50) self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54) self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456) self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21) self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689) self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0) self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3])) self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449) self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17) self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51) self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3])) self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf) print(f'Error testing') self.assertRaises(AssertionError, eval_fv, 15) self.assertRaises(KeyError, eval_fv, pd.DataFrame([1, 2, 3], columns=['non_value'])) # test max drawdown == 0: # TODO: investigate: how does divide by zero change? self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792) self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14) self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50) def test_info_ratio(self): reference = self.test_data1 self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316) self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457) self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143) self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068) self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027) self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283) def test_volatility(self): self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166) self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442) self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853) self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814) self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522) self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308) self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406) self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311) self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473) self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424) self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021) self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969) self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504) self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156) self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf) self.assertRaises(AssertionError, eval_volatility, [1, 2, 3]) # 测试长数据的Volatility计算 expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514, 0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305, 0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415, 0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056, 0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625, 0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689, 0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106, 0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225, 0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639, 0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038, 0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159, 0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631, 0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155, 0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378, 0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356, 0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766, 0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552, 0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045, 0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318, 0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107, 0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286, 0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954, 0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271, 0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908, 0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677, 0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661, 0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649, 0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676, 0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157, 0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626, 0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145, 0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663, 0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643, 0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295, 0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074, 0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591, 0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377, 0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115, 0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262, 0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779, 0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437, 0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288, 0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661, 0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487, 0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383, 0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759, 0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463, 0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833, 0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593, 0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381]) test_volatility = eval_volatility(self.long_data) test_volatility_roll = self.long_data['volatility'].values self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility)) self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True)) def test_sharp(self): self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557) self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667) self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547) self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241) self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673) self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537) self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971) # 测试长数据的sharp率计算 expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698, -0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613, -0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512, -0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214, -0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238, -0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266, -0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144, -0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162, -0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889, -0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677, -0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208, 0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538, 0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643, 0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706, -0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912, -0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137, -0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891, -0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809, -0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497, 0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826, 0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183, 0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297, 0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161, 0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529, 0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728, 0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062, 0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977, 0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012, 0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458, 0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243, 0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996, 0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875, 0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094, 0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352, 0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334, 0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738, 0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791, 0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571, 0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587, 0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355, 0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316, 0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652, 0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815, 0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151, 0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158, 0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819, 0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918, 0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706, 0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609, 0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405]) test_sharp = eval_sharp(self.long_data, 5, 0.00035) self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp) self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True)) def test_beta(self): reference = self.test_data1 self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939) self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233) self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986) self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532) self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082) self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809) self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value') self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value') self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value') # 测试长数据的beta计算 expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598, -0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924, -0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303, -0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811, -0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583, -0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294, -0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763, -0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914, -0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765, -0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065, -0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738, -0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737, -0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831, -0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093, -0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224, -0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769, -0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891, -0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574, -0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344, -0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002, -0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515, -0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637, -0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867, -0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414, -0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762, -0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162, -0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823, -0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145, -0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495, -0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759, -0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663, -0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323, -0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267, -0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708, -0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049, -0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152, -0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571, -0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584, -0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674, -0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479, -0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044, -0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738, -0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211, -0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925, -0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359, -0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355, -0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113, -0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791, -0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647, -0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695]) test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value') test_beta_roll = self.long_data['beta'].values self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta)) self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True)) def test_alpha(self): reference = self.test_data1 self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977) self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071) self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872) self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168) self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359) self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545) # 测试长数据的alpha计算 expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, -0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678, -0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686, -0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275, 0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245, -0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743, -0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428, -0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221, -0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761, -0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148, -0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215, -0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388, 0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058, 0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115, 0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313, -0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265, -0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498, -0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091, -0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402, -0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138, 0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648, 0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924, 0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974, 0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915, 0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165, 0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636, 0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211, 0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822, 0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897, 0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792, 0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809, 0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943, 0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168, 0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564, 0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087, 0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991, 0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428, 0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081, 0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256, 0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736, 0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322, 0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224, 0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547, 0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907, 0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003, 0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142, 0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439, 0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473, 0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329, 0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203, 0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835]) test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value') test_alpha_roll = self.long_data['alpha'].values self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha)) self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True)) def test_calmar(self): """test evaluate function eval_calmar()""" pass def test_benchmark(self): reference = self.test_data1 tr, yr = eval_benchmark(self.test_data2, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data3, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data4, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data5, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data6, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) tr, yr = eval_benchmark(self.test_data7, reference, 'value') self.assertAlmostEqual(tr, 0.19509091) self.assertAlmostEqual(yr, 0.929154957) def test_evaluate(self): pass class TestLoop(unittest.TestCase): """通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性""" def setUp(self): # 精心设计的模拟股票名称、交易日期、以及股票价格 self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7'] self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07', '2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14', '2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21', '2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28', '2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04', '2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11', '2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18', '2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25', '2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01', '2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08', '2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15', '2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22', '2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29', '2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13', '2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20', '2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26', '2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01', '2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08', '2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15', '2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22'] self.dates = [pd.Timestamp(date_text) for date_text in self.dates] self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75], [5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48], [5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56], [5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62], [5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62], [6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59], [5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33], [6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88], [6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47], [5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51], [5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83], [5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67], [5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79], [5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18], [5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02], [5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41], [6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65], [6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89], [6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41], [6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66], [6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37], [6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58], [5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76], [6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37], [6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16], [6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02], [6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77], [6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38], [6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07], [6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90], [6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50], [6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76], [6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29], [7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17], [6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84], [6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83], [6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21], [6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00], [6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76], [6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27], [6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40], [6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11], [6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60], [7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23], [6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59], [6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50], [6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80], [7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55], [7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35], [7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51], [7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08], [7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06], [7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43], [7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43], [7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74], [7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44], [6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83], [6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71], [6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12], [6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45], [6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85], [5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79], [6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91], [6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26], [6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14], [6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53], [7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65], [6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69], [7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50], [7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25], [7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64], [7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00], [7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63], [7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78], [7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42], [6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76], [7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62], [6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67], [6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61], [6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39], [6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43], [6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33], [6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03], [6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28], [6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46], [6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25], [5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95], [6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87], [6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63], [6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40], [7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53], [7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96], [6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26], [7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97], [6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16], [7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58], [6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23], [6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62], [6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13], [6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]]) # 精心设计的模拟PT持股仓位目标信号: self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150], [0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000], [0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104], [0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096], [0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111], [0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160], [0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190], [0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173], [0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272], [0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300], [0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]]) # 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似 self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150], [0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000], [0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000], [0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000], [-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000], [0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]]) # 精心设计的模拟VS股票交易信号,与模拟PS信号类似 self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 300, 300], [400, 400, 000, 000, 000, 000, 000], [000, 000, 250, 000, 000, 000, 000], [000, 000, 000, 000, -400, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, -200, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, -300], [000, 000, 000, 000, 500, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 000, 300, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 400, 000, -300, 600, 000], [500, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [600, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, -400, 600], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 500, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 300, 000, 000], [-500, 000, 000, 500, 000, 200, 000], [000, 000, 000, 000, 000, 000, 000], [500, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, -700, 000, -600, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-400, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 300, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, -600, 000, 300], [000, 000, 000, 000, 000, 000, 000], [000, -300, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [-200, 000, 700, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000], [000, 000, 000, 000, 000, 000, 000]]) # 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作 self.multi_shares = ['000010', '000030', '000039'] self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07', '2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14', '2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21', '2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28', '2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04', '2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11', '2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18', '2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25', '2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01', '2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08'] self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates] # 操作的交易价格包括开盘价、最高价和收盘价 self.multi_prices_open = np.array([[10.02, 9.88, 7.26], [10.00, 9.88, 7.00], [9.98, 9.89, 6.88], [9.97, 9.75, 6.91], [9.99, 9.74, np.nan], [10.01, 9.80, 6.81], [10.04, 9.62, 6.63], [10.06, 9.65, 6.45], [10.06, 9.58, 6.16], [10.11, 9.67, 6.24], [10.11, 9.81, 5.96], [10.07, 9.80, 5.97], [10.06, 10.00, 5.96], [10.09, 9.95, 6.20], [10.03, 10.10, 6.35], [10.02, 10.06, 6.11], [10.06, 10.14, 6.37], [10.08, 9.90, 5.58], [9.99, 10.20, 5.65], [10.00, 10.29, 5.65], [10.03, 9.86, 5.19], [10.02, 9.48, 5.42], [10.06, 10.01, 6.30], [10.03, 10.24, 6.15], [9.97, 10.26, 6.05], [9.94, 10.24, 5.89], [9.83, 10.12, 5.22], [9.78, 10.65, 5.20], [9.77, 10.64, 5.07], [9.91, 10.56, 6.04], [9.92, 10.42, 6.12], [9.97, 10.43, 5.85], [9.91, 10.29, 5.67], [9.90, 10.30, 6.02], [9.88, 10.44, 6.04], [9.91, 10.60, 7.07], [9.63, 10.67, 7.64], [9.64, 10.46, 7.99], [9.57, 10.39, 7.59], [9.55, 10.90, 8.73], [9.58, 11.01, 8.72], [9.61, 11.01, 8.97], [9.62, np.nan, 8.58], [9.55, np.nan, 8.71], [9.57, 10.82, 8.77], [9.61, 11.02, 8.40], [9.63, 10.96, 7.95], [9.64, 11.55, 7.76], [9.61, 11.74, 8.25], [9.56, 11.80, 7.51]]) self.multi_prices_high = np.array([[10.07, 9.91, 7.41], [10.00, 10.04, 7.31], [10.00, 9.93, 7.14], [10.00, 10.04, 7.00], [10.03, 9.84, np.nan], [10.03, 9.88, 6.82], [10.04, 9.99, 6.96], [10.09, 9.70, 6.85], [10.10, 9.67, 6.50], [10.14, 9.71, 6.34], [10.11, 9.85, 6.04], [10.10, 9.90, 6.02], [10.09, 10.00, 6.12], [10.09, 10.20, 6.38], [10.10, 10.11, 6.43], [10.05, 10.18, 6.46], [10.07, 10.21, 6.43], [10.09, 10.26, 6.27], [10.10, 10.38, 5.77], [10.00, 10.47, 6.01], [10.04, 10.42, 5.67], [10.04, 10.07, 5.67], [10.06, 10.24, 6.35], [10.09, 10.27, 6.32], [10.05, 10.38, 6.43], [9.97, 10.43, 6.36], [9.96, 10.39, 5.79], [9.86, 10.65, 5.47], [9.77, 10.84, 5.65], [9.92, 10.65, 6.04], [9.94, 10.73, 6.14], [9.97, 10.63, 6.23], [9.97, 10.51, 5.83], [9.92, 10.35, 6.25], [9.92, 10.46, 6.27], [9.92, 10.63, 7.12], [9.93, 10.74, 7.82], [9.64, 10.76, 8.14], [9.58, 10.54, 8.27], [9.60, 11.02, 8.92], [9.58, 11.12, 8.76], [9.62, 11.17, 9.15], [9.62, np.nan, 8.90], [9.64, np.nan, 9.01], [9.59, 10.92, 9.16], [9.62, 11.15, 9.00], [9.63, 11.11, 8.27], [9.70, 11.55, 7.99], [9.66, 11.95, 8.33], [9.64, 11.93, 8.25]]) self.multi_prices_close = np.array([[10.04, 9.68, 6.64], [10.00, 9.87, 7.26], [10.00, 9.86, 7.03], [9.99, 9.87, 6.87], [9.97, 9.79, np.nan], [9.99, 9.82, 6.64], [10.03, 9.80, 6.85], [10.03, 9.66, 6.70], [10.06, 9.62, 6.39], [10.06, 9.58, 6.22], [10.11, 9.69, 5.92], [10.09, 9.78, 5.91], [10.07, 9.75, 6.11], [10.06, 9.96, 5.91], [10.09, 9.90, 6.23], [10.03, 10.04, 6.28], [10.03, 10.06, 6.28], [10.06, 10.08, 6.27], [10.08, 10.24, 5.70], [10.00, 10.24, 5.56], [9.99, 10.24, 5.67], [10.03, 9.86, 5.16], [10.03, 10.13, 5.69], [10.06, 10.12, 6.32], [10.03, 10.10, 6.14], [9.97, 10.25, 6.25], [9.94, 10.24, 5.79], [9.83, 10.22, 5.26], [9.77, 10.75, 5.05], [9.84, 10.64, 5.45], [9.91, 10.56, 6.06], [9.93, 10.60, 6.21], [9.96, 10.42, 5.69], [9.91, 10.25, 5.46], [9.91, 10.24, 6.02], [9.88, 10.49, 6.69], [9.91, 10.57, 7.43], [9.64, 10.63, 7.72], [9.56, 10.48, 8.16], [9.57, 10.37, 7.83], [9.55, 10.96, 8.70], [9.57, 11.02, 8.71], [9.61, np.nan, 8.88], [9.61, np.nan, 8.54], [9.55, 10.88, 8.87], [9.57, 10.87, 8.87], [9.63, 11.01, 8.18], [9.64, 11.01, 7.80], [9.65, 11.58, 7.97], [9.62, 11.80, 8.25]]) # 交易信号包括三组,分别作用与开盘价、最高价和收盘价 # 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割 self.multi_signals = [] # multisignal的第一组信号为开盘价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.000, 0.000], [0.000, -0.500, 0.000], [0.000, -0.500, 0.000], [0.000, 0.000, 0.000], [0.150, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.300, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.300], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.350, 0.250], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.100, 0.000, 0.350], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.050, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 第二组信号为最高价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.150, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, -0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.200], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 第三组信号为收盘价信号 self.multi_signals.append( pd.DataFrame(np.array([[0.000, 0.200, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-0.500, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, -0.800], [0.000, 0.000, 0.000], [0.000, -1.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-0.750, 0.000, 0.000], [0.000, 0.000, -0.850], [0.000, 0.000, 0.000], [0.000, -0.700, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [0.000, 0.000, -1.000], [0.000, 0.000, 0.000], [0.000, 0.000, 0.000], [-1.000, 0.000, 0.000], [0.000, -1.000, 0.000], [0.000, 0.000, 0.000]]), columns=self.multi_shares, index=self.multi_dates ) ) # 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价 self.multi_histories = [] # multisignal的第一组信号为开盘价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_open, columns=self.multi_shares, index=self.multi_dates ) ) # 第二组信号为最高价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_high, columns=self.multi_shares, index=self.multi_dates ) ) # 第三组信号为收盘价信号 self.multi_histories.append( pd.DataFrame(self.multi_prices_close, columns=self.multi_shares, index=self.multi_dates ) ) # 设置回测参数 self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000]) self.rate = qt.Cost(buy_fix=0, sell_fix=0, buy_rate=0, sell_rate=0, buy_min=0, sell_min=0, slipage=0) self.rate2 = qt.Cost(buy_fix=0, sell_fix=0, buy_rate=0, sell_rate=0, buy_min=10, sell_min=5, slipage=0) self.pt_signal_hp = dataframe_to_hp( pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares), htypes='close' ) self.ps_signal_hp = dataframe_to_hp( pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares), htypes='close' ) self.vs_signal_hp = dataframe_to_hp( pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares), htypes='close' ) self.multi_signal_hp = stack_dataframes( self.multi_signals, stack_along='htypes', htypes='open, high, close' ) self.history_list = dataframe_to_hp( pd.DataFrame(self.prices, index=self.dates, columns=self.shares), htypes='close' ) self.multi_history_list = stack_dataframes( self.multi_histories, stack_along='htypes', htypes='open, high, close' ) # 模拟PT信号回测结果 # PT信号,先卖后买,交割期为0 self.pt_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614], [101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007], [1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309], [1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500], [2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150], [0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475], [0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854], [644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715], [644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261], [644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920], [644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702], [644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824], [644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320], [0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]]) # PT信号,先买后卖,交割期为0 self.pt_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209], [348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893], [348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474], [348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488], [101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614], [101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270], [797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593], [1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667], [1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611], [2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742], [0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917], [0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125], [644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692], [644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080], [644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399], [644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657], [644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525], [644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435], [644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777], [644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324], [0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]]) # PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入 self.pt_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661], [101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027], [797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323], [1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540], [1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103], [1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076], [1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545], [2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888], [578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261], [0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268], [644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942], [644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523], [644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613], [644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181], [644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052], [644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589], [0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]]) # PT信号,先买后卖,交割期为2天(股票)1天(现金) self.pt_res_bs21 = np.array([ [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821], [348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789], [348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347], [348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149], [101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661], [101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027], [797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497], [797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683], [1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725], [1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358], [1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842], [1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700], [2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098], [661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243], [0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984], [667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495], [667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515], [667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595], [667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213], [667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148], [0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]]) # 模拟PS信号回测结果 # PS信号,先卖后买,交割期为0 self.ps_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111], [346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118], [346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621], [115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346], [1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000, 33323.8359], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991], [0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286], [0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191], [0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439], [0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761], [0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003], [0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445], [0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]]) # PS信号,先买后卖,交割期为0 self.ps_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667], [0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111], [346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118], [346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811], [346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537], [231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613], [231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463], [231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925], [115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621], [115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447], [1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008], [1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913], [1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636], [0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059], [0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294], [978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505], [978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575], [195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970], [195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103], [195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544], [195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156], [195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834], [195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779], [0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]]) # PS信号,先卖后买,交割期为2天(股票)1天(现金) self.ps_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111], [346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112], [346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181], [346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554], [231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561], [231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946], [231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492], [115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162], [115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828], [1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767], [1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135], [1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099], [0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029], [0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019], [0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644], [0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276], [0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400], [0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544], [0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]]) # PS信号,先买后卖,交割期为2天(股票)1天(现金) self.ps_res_bs21 = np.array( [[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667], [0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111], [351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961], [351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664], [351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932], [234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263], [234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437], [234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440], [117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495], [117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298], [708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215], [1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450], [1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244], [1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297], [1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617], [0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961], [0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293], [962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232], [962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345], [192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463], [192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633], [192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227], [192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227], [192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030], [192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668], [0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]]) # 模拟VS信号回测结果 # VS信号,先卖后买,交割期为0 self.vs_res_sb00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000], [400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000], [400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357], [0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700], [1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33870.4703], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34014.3010], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34680.5671], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33890.9945], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34004.6640], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 34127.7768], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33421.1638], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000, 33120.9057], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 33504.6236], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 33652.1318], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 34680.4867], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35557.5191], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35669.7128], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000, 35211.4466], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]]) # VS信号,先买后卖,交割期为0 self.vs_res_bs00 = np.array( [[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925], [0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785], [400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666], [400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069], [400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391], [200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357], [200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357], [200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357], [0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357], [0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357], [500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357], [1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837], [1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995], [1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479], [600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276], [1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638], [1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706], [700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276], [700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792], [700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647], [700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353], [500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]]) # VS信号,先卖后买,交割期为2天(股票)1天(现金) self.vs_res_sb20 = np.array( [[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000], [0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000], [400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000], [400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736], [0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470], [1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]]) # VS信号,先买后卖,交割期为2天(股票)1天(现金) self.vs_res_bs21 = np.array( [[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000], [0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000], [0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000], [400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000], [400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951], [400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686], [200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736], [200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736], [200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736], [0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736], [0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736], [500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736], [1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518], [1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470], [1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575], [600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543], [1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164], [1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317], [700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713], [700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608], [700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946], [700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584], [500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]]) # Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金) self.multi_res = np.array( [[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867], [0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650], [0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513], [0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399], [150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054], [150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860], [75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059], [75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711], [75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970], [75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282], [75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794], [75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474], [75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570], [18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929], [18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107], [18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486], [18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768], [132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733], [132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080], [132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553], [132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421], [259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953], [0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895], [0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389], [0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]]) def test_loop_step_pt_sb00(self): """ test loop step PT-signal, sell first""" c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0, own_cash=10000, own_amounts=np.zeros(7, dtype='float'), available_cash=10000, available_amounts=np.zeros(7, dtype='float'), op=self.pt_signals[0], prices=self.prices[0], rate=self.rate, pt_buy_threshold=0.1, pt_sell_threshold=0.1, maximize_cash_usage=True, allow_sell_short=False, moq_buy=0, moq_sell=0, print_log=True) print(f'day 1 result in complete looping: \n' f'cash_change: +{c_g:.2f} / {c_s:.2f}\n' f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n' f'----------------------------------\n') cash = 10000 + c_g + c_s amounts = np.zeros(7, dtype='float') + a_p + a_s self.assertAlmostEqual(cash, 7500) self.assertTrue(np.allclose(amounts,
np.array([0, 0, 0, 0, 555.5555556, 0, 0])
numpy.array
import numpy as np import torch import rl_utils class Softmax_pd(object): def __init__(self,action_dim, actions_per_dim, min_action=-1, max_action=1.): self.action_dim = action_dim self.actions_per_dim = actions_per_dim self.action_converter = rl_utils.Action_converter(1,actions_per_dim,min_action=min_action, max_action=max_action) def kl(self, old_logp, logp, log_vars, masks): logp, old_logp = rl_utils.unpad_list([logp, old_logp],masks) kl = 0.5 * np.mean((logp - old_logp)**2) return kl def entropy(self, logp, log_vars, masks): logp, = rl_utils.unpad_list([logp],masks) p = np.exp(logp) entropy = np.sum( - p * np.log2(p)) / logp.shape[0] return entropy def logp(self, actions, logits, log_vars): logps = [] logits = torch.split(logits,self.actions_per_dim,dim=1) for i in range(actions.shape[1]): logp1 = torch.nn.functional.log_softmax(logits[i],dim=1) m = logits[i].shape[0] a = actions[:,i] logp = logp1[torch.arange(m), actions[:,i]] logps.append(logp) logps = torch.stack(logps, dim=1) logp = torch.sum(logps,dim=1) - np.log(self.action_dim) return logp def sample(self, logits, log_vars, test_mode): U = np.random.uniform(low=0.0, high=1.0, size=(self.action_dim,self.actions_per_dim)) logits = logits.reshape(self.action_dim, self.actions_per_dim) if test_mode: action = np.argmax(logits,axis=1) else: action = np.argmax(logits - np.log(-np.log(U)) ,axis=1) env_action = np.squeeze(self.action_converter.idx2action(action)) return
np.expand_dims(action,axis=0)
numpy.expand_dims
# ScanNet util_3d: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/util_3d.py import json, numpy as np def load_ids(filename): ids = open(filename).read().splitlines() ids =
np.array(ids, dtype=np.int64)
numpy.array
import pytest import numpy as np import numpy.linalg import theano from numpy import inf from numpy.testing import assert_array_almost_equal from theano import tensor, function from theano.tensor.basic import _allclose from theano import config from theano.tensor.nlinalg import ( MatrixInverse, matrix_inverse, pinv, AllocDiag, alloc_diag, ExtractDiag, extract_diag, diag, trace, det, Eig, eig, eigh, matrix_dot, qr, matrix_power, norm, svd, SVD, TensorInv, tensorinv, tensorsolve, ) from tests import unittest_tools as utt def test_pseudoinverse_correctness(): rng = np.random.RandomState(utt.fetch_seed()) d1 = rng.randint(4) + 2 d2 = rng.randint(4) + 2 r = rng.randn(d1, d2).astype(theano.config.floatX) x = tensor.matrix() xi = pinv(x) ri = function([x], xi)(r) assert ri.shape[0] == r.shape[1] assert ri.shape[1] == r.shape[0] assert ri.dtype == r.dtype # Note that pseudoinverse can be quite unprecise so I prefer to compare # the result with what np.linalg returns assert _allclose(ri, np.linalg.pinv(r)) def test_pseudoinverse_grad(): rng = np.random.RandomState(utt.fetch_seed()) d1 = rng.randint(4) + 2 d2 = rng.randint(4) + 2 r = rng.randn(d1, d2).astype(theano.config.floatX) utt.verify_grad(pinv, [r]) class TestMatrixInverse(utt.InferShapeTester): def setup_method(self): super().setup_method() self.op_class = MatrixInverse self.op = matrix_inverse self.rng = np.random.RandomState(utt.fetch_seed()) def test_inverse_correctness(self): r = self.rng.randn(4, 4).astype(theano.config.floatX) x = tensor.matrix() xi = self.op(x) ri = function([x], xi)(r) assert ri.shape == r.shape assert ri.dtype == r.dtype rir = np.dot(ri, r) rri = np.dot(r, ri) assert _allclose(np.identity(4), rir), rir assert _allclose(np.identity(4), rri), rri def test_infer_shape(self): r = self.rng.randn(4, 4).astype(theano.config.floatX) x = tensor.matrix() xi = self.op(x) self._compile_and_check([x], [xi], [r], self.op_class, warn=False) def test_matrix_dot(): rng = np.random.RandomState(utt.fetch_seed()) n = rng.randint(4) + 2 rs = [] xs = [] for k in range(n): rs += [rng.randn(4, 4).astype(theano.config.floatX)] xs += [tensor.matrix()] sol = matrix_dot(*xs) theano_sol = function(xs, sol)(*rs) numpy_sol = rs[0] for r in rs[1:]: numpy_sol = np.dot(numpy_sol, r) assert _allclose(numpy_sol, theano_sol) def test_qr_modes(): rng = np.random.RandomState(utt.fetch_seed()) A = tensor.matrix("A", dtype=theano.config.floatX) a = rng.rand(4, 4).astype(theano.config.floatX) f = function([A], qr(A)) t_qr = f(a) n_qr = np.linalg.qr(a) assert _allclose(n_qr, t_qr) for mode in ["reduced", "r", "raw"]: f = function([A], qr(A, mode)) t_qr = f(a) n_qr =
np.linalg.qr(a, mode)
numpy.linalg.qr
import numpy as np import matplotlib.pyplot as plt from collections import Counter from sklearn.datasets import make_classification from imblearn.over_sampling import SMOTE # doctest: +NORMALIZE_WHITESPACE from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt def KNN_aug(bin): #bin = np.concatenate((bin,data[0:5,:]),axis=0) # create a new empty data n,m = bin.shape new = np.zeros((1, bin.shape[1])) if(bin.shape[0]<=5): # this statement will generate a list of random numbers and add up to 1 w = np.random.dirichlet(np.ones(bin.shape[0]), size=1)[0] for i in range(bin.shape[0]): new = new + w[i] * bin[i, :].reshape(1,m) else: # randomly select a subjects x0 index = int(np.random.rand()*n) x0 = bin[index, 1:8101].reshape(1,m-1) y0 = bin[index,0] # use KNN to find 4 nearest neighbour to x0 KNN = KNeighborsClassifier(n_neighbors=4) X = bin[:,1:m] y = bin[:,0].astype(int) #print(y) KNN.fit(X, y) # return a list of probabilities of the sub belongs to proba = KNN.predict_proba(x0) selected = np.append(y0,x0).reshape(1,m) while(selected.shape[0]<5): index_max = proba.argmax() unique_scores = np.unique(bin[:,0]) score = unique_scores[index_max] index = np.where(bin[:,0]==score)[0] selected = np.concatenate((selected,bin[index]), axis=0) np.delete(proba,index_max) w = np.random.dirichlet(np.ones(5), size=1)[0] for i in range(5): #new = new + w[i] * selected[i].reshape(1, m) new = new + w[i] * bin[int(np.random.rand()*n)].reshape(1, m) bin = np.concatenate((new,bin), axis=0) #bin = np.concatenate((bin, new), axis=0) return bin def augmentation(data): index = np.where(data[:,0] < 70) bin1 = data[index[0]] index = np.where(data[:,0] < 80) temp = data[index[0]] index = np.where(70 <= temp[:,0]) bin2 = temp[index[0]] index = np.where(data[:,0] < 90) temp = data[index[0]] index = np.where(80 <= temp[:,0]) bin3 = temp[index[0]] index = np.where(data[:,0] < 100) temp = data[index[0]] index = np.where(90 <= temp[:,0]) bin4 = temp[index[0]] index =
np.where(100 <= data[:,0])
numpy.where
# -*- coding: utf-8 -*- # Copyright (c) 2019 the HERA Project # Licensed under the MIT License import pytest import os import shutil import hera_qm.xrfi as xrfi import numpy as np import pyuvdata.tests as uvtest from pyuvdata import UVData from pyuvdata import UVCal import hera_qm.utils as utils from hera_qm.data import DATA_PATH from pyuvdata import UVFlag import glob test_d_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA') test_uvfits_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.uvfits') test_uvh5_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvh5') test_c_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits') test_f_file = test_d_file + '.testuvflag.h5' test_f_file_flags = test_d_file + '.testuvflag.flags.h5' # version in 'flag' mode test_outfile = os.path.join(DATA_PATH, 'test_output', 'uvflag_testout.h5') xrfi_path = os.path.join(DATA_PATH, 'test_output') test_flag_integrations= os.path.join(DATA_PATH, 'a_priori_flags_integrations.yaml') test_flag_jds= os.path.join(DATA_PATH, 'a_priori_flags_jds.yaml') test_flag_lsts= os.path.join(DATA_PATH, 'a_priori_flags_lsts.yaml') test_uvh5_files = ['zen.2457698.40355191.xx.HH.uvh5', 'zen.2457698.40367619.xx.HH.uvh5', 'zen.2457698.40380046.xx.HH.uvh5'] test_c_files = ['zen.2457698.40355191.xx.HH.uvcAA.omni.calfits', 'zen.2457698.40367619.xx.HH.uvcAA.omni.calfits', 'zen.2457698.40380046.xx.HH.uvcAA.omni.calfits'] for cnum, cf, uvf in zip(range(3), test_c_files, test_uvh5_files): test_c_files[cnum] = os.path.join(DATA_PATH, cf) test_uvh5_files[cnum] = os.path.join(DATA_PATH, uvf) pytestmark = pytest.mark.filterwarnings( "ignore:The uvw_array does not match the expected values given the antenna positions.", "ignore:telescope_location is not set. Using known values for HERA.", "ignore:antenna_positions is not set. Using known values for HERA." ) def test_uvdata(): uv = UVData() uv.read_miriad(test_d_file) xant = uv.get_ants()[0] xrfi.flag_xants(uv, xant) assert np.all(uv.flag_array[uv.ant_1_array == xant, :, :, :]) assert np.all(uv.flag_array[uv.ant_2_array == xant, :, :, :]) def test_uvcal(): uvc = UVCal() uvc.read_calfits(test_c_file) xant = uvc.ant_array[0] xrfi.flag_xants(uvc, xant) assert np.all(uvc.flag_array[0, :, :, :, :]) def test_uvflag(): uvf = UVFlag(test_f_file) uvf.to_flag() xant = uvf.ant_1_array[0] xrfi.flag_xants(uvf, xant) assert np.all(uvf.flag_array[uvf.ant_1_array == xant, :, :, :]) assert np.all(uvf.flag_array[uvf.ant_2_array == xant, :, :, :]) def test_input_error(): pytest.raises(ValueError, xrfi.flag_xants, 4, 0) def test_uvflag_waterfall_error(): uvf = UVFlag(test_f_file) uvf.to_waterfall() uvf.to_flag() pytest.raises(ValueError, xrfi.flag_xants, uvf, 0) def test_uvflag_not_flag_error(): uvf = UVFlag(test_f_file) pytest.raises(ValueError, xrfi.flag_xants, uvf, 0) def test_not_inplace_uvflag(): uvf = UVFlag(test_f_file) xant = uvf.ant_1_array[0] uvf2 = xrfi.flag_xants(uvf, xant, inplace=False) assert np.all(uvf2.flag_array[uvf2.ant_1_array == xant, :, :, :]) assert np.all(uvf2.flag_array[uvf2.ant_2_array == xant, :, :, :]) def test_not_inplace_uvdata(): uv = UVData() uv.read_miriad(test_d_file) xant = uv.get_ants()[0] uv2 = xrfi.flag_xants(uv, xant, inplace=False) assert np.all(uv2.flag_array[uv2.ant_1_array == xant, :, :, :]) assert np.all(uv2.flag_array[uv2.ant_2_array == xant, :, :, :]) def test_resolve_xrfi_path_given(): dirname = xrfi.resolve_xrfi_path(xrfi_path, test_d_file) assert xrfi_path == dirname def test_resolve_xrfi_path_empty(): dirname = xrfi.resolve_xrfi_path('', test_d_file) assert os.path.dirname(os.path.abspath(test_d_file)) == dirname def test_resolve_xrfi_path_does_not_exist(): dirname = xrfi.resolve_xrfi_path(os.path.join(xrfi_path, 'foogoo'), test_d_file) assert os.path.dirname(os.path.abspath(test_d_file)) == dirname def test_resolve_xrfi_path_jd_subdir(): dirname = xrfi.resolve_xrfi_path('', test_d_file, jd_subdir=True) expected_dir = os.path.join(os.path.dirname(os.path.abspath(test_d_file)), '.'.join(os.path.basename(test_d_file).split('.')[0:3]) + '.xrfi') assert dirname == expected_dir assert os.path.exists(expected_dir) shutil.rmtree(expected_dir) def test_check_convolve_dims_3D(): # Error if d.ndims != 2 pytest.raises(ValueError, xrfi._check_convolve_dims, np.ones((3, 2, 3)), 1, 2) def test_check_convolve_dims_1D(): size = 10 d = np.ones(size) with uvtest.check_warnings( UserWarning, match=f"K1 value {size + 1} is larger than the data", nwarnings=1 ): K = xrfi._check_convolve_dims(d, size + 1) assert K == size def test_check_convolve_dims_kernel_not_given(): size = 10 d = np.ones((size, size)) with uvtest.check_warnings( UserWarning, match=["No K1 input provided.", "No K2 input provided"], nwarnings=2 ): K1, K2 = xrfi._check_convolve_dims(d) assert K1 == size assert K2 == size def test_check_convolve_dims_Kt_too_big(): size = 10 d = np.ones((size, size)) with uvtest.check_warnings( UserWarning, match=f"K1 value {size + 1} is larger than the data", nwarnings=1, ): Kt, Kf = xrfi._check_convolve_dims(d, size + 1, size) assert Kt == size assert Kf == size def test_check_convolve_dims_Kf_too_big(): size = 10 d = np.ones((size, size)) with uvtest.check_warnings( UserWarning, match=f"K2 value {size + 1} is larger than the data", nwarnings=1, ): Kt, Kf = xrfi._check_convolve_dims(d, size, size + 1) assert Kt == size assert Kf == size def test_check_convolve_dims_K1K2_lt_one(): size = 10 data = np.ones((size, size)) pytest.raises(ValueError, xrfi._check_convolve_dims, data, 0, 2) pytest.raises(ValueError, xrfi._check_convolve_dims, data, 2, 0) def test_robus_divide(): a = np.array([1., 1., 1.], dtype=np.float32) b = np.array([2., 0., 1e-9], dtype=np.float32) c = xrfi.robust_divide(a, b) assert np.array_equal(c, np.array([1. / 2., np.inf, np.inf])) @pytest.fixture(scope='function') def fake_data(): size = 100 fake_data = np.zeros((size, size)) # yield returns the data and lets us do post test clean up after yield fake_data # post-test clean up del(fake_data) return def test_medmin(fake_data): # make fake data for i in range(fake_data.shape[1]): fake_data[:, i] = i * np.ones_like(fake_data[:, i]) # medmin should be .size - 1 for these data medmin = xrfi.medmin(fake_data) assert np.allclose(medmin, fake_data.shape[0] - 1) # Test error when wrong dimensions are passed pytest.raises(ValueError, xrfi.medmin, np.ones((5, 4, 3))) def test_medminfilt(fake_data): # make fake data for i in range(fake_data.shape[1]): fake_data[:, i] = i * np.ones_like(fake_data[:, i]) # run medmin filt Kt = 8 Kf = 8 d_filt = xrfi.medminfilt(fake_data, Kt=Kt, Kf=Kf) # build up "answer" array ans = np.zeros_like(fake_data) for i in range(fake_data.shape[1]): if i < fake_data.shape[0] - Kf: ans[:, i] = i + (Kf - 1) else: ans[:, i] = fake_data.shape[0] - 1 assert np.allclose(d_filt, ans) def test_detrend_deriv(fake_data): # make fake data for i in range(fake_data.shape[0]): for j in range(fake_data.shape[1]): fake_data[i, j] = j * i**2 + j**3 # run detrend_deriv in both dimensions dtdf = xrfi.detrend_deriv(fake_data, df=True, dt=True) ans = np.ones_like(dtdf) assert np.allclose(dtdf, ans) # only run along frequency for i in range(fake_data.shape[0]): for j in range(fake_data.shape[1]): fake_data[i, j] = j**3 df = xrfi.detrend_deriv(fake_data, df=True, dt=False) ans = np.ones_like(df) assert np.allclose(df, ans) # only run along time for i in range(fake_data.shape[0]): for j in range(fake_data.shape[1]): fake_data[i, j] = i**3 dt = xrfi.detrend_deriv(fake_data, df=False, dt=True) ans = np.ones_like(dt) assert np.allclose(dt, ans) # catch error of df and dt both being False pytest.raises(ValueError, xrfi.detrend_deriv, fake_data, dt=False, df=False) # Test error when wrong dimensions are passed pytest.raises(ValueError, xrfi.detrend_deriv, np.ones((5, 4, 3))) def test_detrend_medminfilt(fake_data): # make fake data for i in range(fake_data.shape[1]): fake_data[:, i] = i * np.ones_like(fake_data[:, i]) # run detrend_medminfilt Kt = 8 Kf = 8 dm = xrfi.detrend_medminfilt(fake_data, Kt=Kt, Kf=Kf) # read in "answer" array # this is output that corresponds to .size==100, Kt==8, Kf==8 ans_fn = os.path.join(DATA_PATH, 'test_detrend_medminfilt_ans.txt') ans = np.loadtxt(ans_fn) assert np.allclose(ans, dm) def test_detrend_medfilt(): # make fake data x = np.sin(np.linspace(0, 2.1 * np.pi, 100)) y = np.cos(np.linspace(0, 5.3 * np.pi, 100)) fake_data = np.outer(x,y) # run detrend medfilt Kt = 101 Kf = 101 with uvtest.check_warnings( UserWarning, match=[ f"K1 value {Kt} is larger than the data", f"K2 value {Kf} is larger than the data", ], nwarnings=2, ): dm = xrfi.detrend_medfilt(fake_data, None, Kt, Kf) # read in "answer" array # this is output that corresponds to .size==100, Kt==101, Kf==101 ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_ans_v2.txt') ans = np.loadtxt(ans_fn) np.testing.assert_array_almost_equal(ans, dm) def test_detrend_medfilt_complex(): # use complex data x = np.sin(np.linspace(0, 2.1 * np.pi, 100)) + 1.0j * np.cos(np.linspace(0, 1.3 * np.pi, 100)) y = np.cos(np.linspace(0, 5.3 * np.pi, 100)) + 1.0j * np.sin(np.linspace(0, 2.9 * np.pi, 100)) fake_data = np.outer(x,y) # run detrend_medfilt Kt = 8 Kf = 8 dm = xrfi.detrend_medfilt(fake_data, Kt=Kt, Kf=Kf) # read in "answer" array # this is output that corresponds to .size=100, Kt=8, Kf=8 ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_complex_ans_v2.txt') ans = np.loadtxt(ans_fn).view('complex') np.testing.assert_array_almost_equal(ans, dm) def test_detrend_medfilt_3d_error(): # Test error when wrong dimensions are passed pytest.raises(ValueError, xrfi.detrend_medfilt, np.ones((5, 4, 3))) def test_detrend_meanfilt(fake_data): # make fake data for i in range(fake_data.shape[1]): fake_data[:, i] = i**2 * np.ones_like(fake_data[:, i]) # run detrend medfilt Kt = 8 Kf = 8 dm = xrfi.detrend_meanfilt(fake_data, Kt=Kt, Kf=Kf) # read in "answer" array # this is output that corresponds to .size==100, Kt==8, Kf==8 ans_fn = os.path.join(DATA_PATH, 'test_detrend_meanfilt_ans.txt') ans = np.loadtxt(ans_fn) assert np.allclose(ans, dm) def test_detrend_meanfilt_flags(fake_data): # make fake data for i in range(fake_data.shape[1]): fake_data[:, i] = i * np.ones_like(fake_data[:, i]) ind = int(fake_data.shape[0] / 2) fake_data[ind, :] = 10000. flags = np.zeros(fake_data.shape, dtype=np.bool_) flags[ind, :] = True # run detrend medfilt Kt = 8 Kf = 8 dm1 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf) # Compare with drastically different flagged values fake_data[ind, :] = 0 dm2 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf) dm2[ind, :] = dm1[ind, :] # These don't have valid values, so don't compare them. assert np.allclose(dm1, dm2) def test_zscore_full_array(fake_data): # Make some fake data np.random.seed(182) fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1]) out = xrfi.zscore_full_array(fake_data) fake_mean = np.mean(fake_data) fake_std = np.std(fake_data) assert np.all(out == (fake_data - fake_mean) / fake_std) def test_zscore_full_array_flags(fake_data): # Make some fake data np.random.seed(182) fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1]) flags = np.zeros(fake_data.shape, dtype=np.bool_) flags[45, 33] = True out = xrfi.zscore_full_array(fake_data, flags=flags) fake_mean = np.mean(np.ma.masked_array(fake_data, flags)) fake_std = np.std(np.ma.masked_array(fake_data, flags)) out_exp = (fake_data - fake_mean) / fake_std out_exp[45, 33] = np.inf assert np.all(out == out_exp) def test_zscore_full_array_modified(fake_data): # Make some fake data np.random.seed(182) fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1]) out = xrfi.zscore_full_array(fake_data, modified=True) fake_med = np.median(fake_data) fake_mad = np.median(np.abs(fake_data - fake_med)) assert np.all(out == (fake_data - fake_med) / (1.486 * fake_mad)) def test_zscore_full_array_modified_complex(fake_data): # Make some fake data np.random.seed(182) rands = np.random.randn(100, 100) fake_data = rands + 1j * rands out = xrfi.zscore_full_array(fake_data, modified=True) fake_med = np.median(rands) fake_mad = np.sqrt(2) * np.median(np.abs(rands - fake_med)) assert np.allclose(out, (fake_data - fake_med - 1j * fake_med) / (1.486 * fake_mad)) def test_modzscore_1d_no_detrend(): npix = 1000 np.random.seed(182) data = np.random.randn(npix) data[50] = 500 out = xrfi.modzscore_1d(data, detrend=False) assert out.shape == (npix,) assert np.isclose(out[50], 500, rtol=.2) assert np.isclose(np.median(np.abs(out)), .67, rtol=.1) def test_modzscore_1d(): npix = 1000 np.random.seed(182) data = np.random.randn(npix) data[50] = 500 data += .1 * np.arange(npix) out = xrfi.modzscore_1d(data) assert out.shape == (npix,) assert np.isclose(out[50], 500, rtol=.2) assert np.isclose(np.median(np.abs(out)), .67, rtol=.1) def test_watershed_flag(): # generate a metrics and flag UVFlag object uv = UVData() uv.read_miriad(test_d_file) uvm = UVFlag(uv, history='I made this') uvf = UVFlag(uv, mode='flag') # set metric and flag arrays to specific values uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[0, 0, 1, 0] = 7. uvf.flag_array[0, 0, 0, 0] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[0, 0, :2, 0] = True assert np.allclose(uvf.flag_array, flag_array) # test flagging channels adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[:, :, 1, :] = 1. uvf.flag_array[:, :, 0, :] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test flagging times adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) times = np.unique(uv.time_array) inds1 = np.where(uv.time_array == times[0])[0] inds2 = np.where(uv.time_array == times[1])[0] uvm.metric_array[inds2, 0, :, 0] = 1. uvf.flag_array[inds1, 0, :, 0] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[inds1, 0, :, 0] = True flag_array[inds2, 0, :, 0] = True assert np.allclose(uvf.flag_array, flag_array) # test antenna type objects uvc = UVCal() uvc.read_calfits(test_c_file) uvm = UVFlag(uvc, history='I made this') uvf = UVFlag(uvc, mode='flag') # set metric and flag arrays to specific values uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[0, 0, 0, 1, 0] = 7. uvf.flag_array[0, 0, 0, 0, 0] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[0, 0, 0, :2, 0] = True assert np.allclose(uvf.flag_array, flag_array) # test flagging channels adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[:, :, 1, :, :] = 1. uvf.flag_array[:, :, 0, :, :] = True # run watershed flag uvf2 = xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=False) # check answer flag_array = np.zeros_like(uvf2.flag_array, dtype=np.bool_) flag_array[:, :, :2, :, :] = True assert np.allclose(uvf2.flag_array, flag_array) del(uvf2) # test flagging times adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[:, :, :, 1, :] = 1. uvf.flag_array[:, :, :, 0, :] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :, :, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test waterfall types uv = UVData() uv.read_miriad(test_d_file) uvm = UVFlag(uv, history='I made this', waterfall=True) uvf = UVFlag(uv, mode='flag', waterfall=True) # set metric and flag arrays to specific values uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[0, 1, 0] = 7. uvf.flag_array[0, 0, 0] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[0, :2, 0] = True assert np.allclose(uvf.flag_array, flag_array) # test flagging channels adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[:, 1, :] = 1. uvf.flag_array[:, 0, :] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test flagging times adjacent to fully flagged ones uvm.metric_array = np.zeros_like(uvm.metric_array) uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvm.metric_array[1, :, :] = 1. uvf.flag_array[0, :, :] = True # run watershed flag xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True) # check answer flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:2, :, :] = True assert np.allclose(uvf.flag_array, flag_array) def test_watershed_flag_errors(): # setup uv = UVData() uv.read_miriad(test_d_file) uvm = UVFlag(uv, history='I made this') uvf = UVFlag(uv, mode='flag') uvf2 = UVFlag(uv, mode='flag', waterfall=True) # pass in objects besides UVFlag pytest.raises(ValueError, xrfi.watershed_flag, 1, 2) pytest.raises(ValueError, xrfi.watershed_flag, uvm, 2) pytest.raises(ValueError, xrfi.watershed_flag, uvm, uvf2) # set the UVFlag object to have a bogus type uvm.type = 'blah' pytest.raises(ValueError, xrfi.watershed_flag, uvm, uvf) def test_ws_flag_waterfall(): # test 1d d = np.zeros((10,)) f = np.zeros((10,), dtype=np.bool_) d[1] = 3. f[0] = True f_out = xrfi._ws_flag_waterfall(d, f, nsig=2.) ans = np.zeros_like(f_out, dtype=np.bool_) ans[:2] = True assert np.allclose(f_out, ans) # another 1D test metric = np.array([2., 2., 5., 0., 2., 0., 5.]) fin = (metric >= 5.) fout = xrfi._ws_flag_waterfall(metric, fin) np.testing.assert_array_equal(fout, [True, True, True, False, False, False, True]) # test 2d d = np.zeros((10, 10)) f = np.zeros((10, 10), dtype=np.bool_) d[0, 1] = 3. d[1, 0] = 3. f[0, 0] = True f_out = xrfi._ws_flag_waterfall(d, f, nsig=2.) ans = np.zeros_like(f_out, dtype=np.bool_) ans[:2, 0] = True ans[0, :2] = True assert np.allclose(f_out, ans) # catch errors d1 = np.zeros((10,)) f2 = np.zeros((10, 10), dtype=np.bool_) pytest.raises(ValueError, xrfi._ws_flag_waterfall, d1, f2) d3 = np.zeros((5, 4, 3)) f3 = np.zeros((5, 4, 3), dtype=np.bool_) pytest.raises(ValueError, xrfi._ws_flag_waterfall, d3, f3) def test_xrfi_waterfall(): # test basic functions np.random.seed(21) data = 100 * np.ones((10, 10)) data += np.random.randn(10, 10) data[3, 3] += 100 data[3, 4] += 3 flags = xrfi.xrfi_waterfall(data) assert np.sum(flags) == 2 assert flags[3, 3] assert flags[3, 4] flags = xrfi.xrfi_waterfall(data, nsig_adj=6.) assert np.sum(flags) == 1 assert flags[3, 3] def test_xrfi_waterfall_prior_flags(): # test with prior flags np.random.seed(21) data = 100 * np.ones((10, 10)) data += np.random.randn(10, 10) prior_flags = np.zeros((10, 10), dtype=bool) prior_flags[3, 3] = True data[3, 4] += 3 flags = xrfi.xrfi_waterfall(data, flags=prior_flags) assert np.sum(flags) == 2 assert flags[3, 3] assert flags[3, 4] flags = xrfi.xrfi_waterfall(data, flags=prior_flags, nsig_adj=6.) assert np.sum(flags) == 1 assert flags[3, 3] def test_xrfi_waterfall_error(): # test errors data = np.ones((10, 10)) with pytest.raises(KeyError): xrfi.xrfi_waterfall(data, algorithm='not_an_algorithm') def test_flag(): # setup uv = UVData() uv.read_miriad(test_d_file) uvm = UVFlag(uv, history='I made this') # initialize array with specific values uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[0, 0, 0, 0] = 7. uvf = xrfi.flag(uvm, nsig_p=6.) assert uvf.mode == 'flag' flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[0, 0, 0, 0] = True assert np.allclose(uvf.flag_array, flag_array) # test channel flagging in baseline type uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[:, :, 0, :] = 7. uvm.metric_array[:, :, 1, :] = 3. uvf = xrfi.flag(uvm, nsig_p=6., nsig_f=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test time flagging in baseline type uvm.metric_array = np.zeros_like(uvm.metric_array) times = np.unique(uvm.time_array) inds1 = np.where(uvm.time_array == times[0])[0] inds2 = np.where(uvm.time_array == times[1])[0] uvm.metric_array[inds1, :, :, :] = 7. uvm.metric_array[inds2, :, :, :] = 3. uvf = xrfi.flag(uvm, nsig_p=6., nsig_t=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[inds1, :, :, :] = True flag_array[inds2, :, :, :] = True assert np.allclose(uvf.flag_array, flag_array) # test channel flagging in antenna type uv = UVCal() uv.read_calfits(test_c_file) uvm = UVFlag(uv, history='I made this') uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[:, :, 0, :, :] = 7. uvm.metric_array[:, :, 1, :, :] = 3. uvf = xrfi.flag(uvm, nsig_p=7., nsig_f=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :, :2, :, :] = True assert np.allclose(uvf.flag_array, flag_array) # test time flagging in antenna type uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[:, :, :, 0, :] = 7. uvm.metric_array[:, :, :, 1, :] = 3. uvf = xrfi.flag(uvm, nsig_p=6., nsig_t=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :, :, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test channel flagging in waterfall type uv = UVData() uv.read_miriad(test_d_file) uvm = UVFlag(uv, history='I made this', waterfall=True) uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[:, 0, :] = 7. uvm.metric_array[:, 1, :] = 3. uvf = xrfi.flag(uvm, nsig_p=6., nsig_f=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:, :2, :] = True assert np.allclose(uvf.flag_array, flag_array) # test time flagging in waterfall type uvm.metric_array = np.zeros_like(uvm.metric_array) uvm.metric_array[0, :, :] = 7. uvm.metric_array[1, :, :] = 3. uvf = xrfi.flag(uvm, nsig_p=6., nsig_t=2.) flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) flag_array[:2, :, :] = True assert np.allclose(uvf.flag_array, flag_array) # catch errors pytest.raises(ValueError, xrfi.flag, 2) uvm.type = 'blah' pytest.raises(ValueError, xrfi.flag, uvm) def test_unflag(): # Do a test, add more tests as needed assert True def test_flag_apply(): # test applying to UVData uv = UVData() uv.read_miriad(test_d_file) uv.flag_array = np.zeros_like(uv.flag_array, dtype=np.bool_) uvf = UVFlag(uv, mode='flag') uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_) uvf.flag_array[:, :, 0, :] = True uvf2 = xrfi.flag_apply(uvf, uv, return_net_flags=True) assert np.allclose(uv.flag_array, uvf2.flag_array) # test applying to UVCal uv = UVCal() uv.read_calfits(test_c_file) uv.flag_array = np.zeros_like(uv.flag_array, dtype=np.bool_) uvf = UVFlag(uv, mode='flag') uvf.flag_array =
np.zeros_like(uvf.flag_array, dtype=np.bool_)
numpy.zeros_like
import numpy as np from uf3.regression import least_squares from uf3.regression import regularize def simple_problem(n_features, n_samples, seed=0): np.random.seed(seed) x = np.random.rand(n_samples, n_features) c = np.random.rand(n_features) y = np.dot(x, c) return x, y, c class TestLinearModel: def test_init(self): regularizer = np.eye(20) model = least_squares.BasicLinearModel(regularizer=regularizer) assert model.regularizer.shape == (20, 20) def test_fit_predict_score(self): x, y, c = simple_problem(20, 500, seed=0) regularizer = np.eye(20) * 1e-6 model = least_squares.BasicLinearModel(regularizer=regularizer) model.fit(x, y) assert np.allclose(model.coefficients, c) assert np.allclose(model.predict(x), y) assert model.score(x, y) < 1e-6 def test_linear_least_squares(): x, y, c = simple_problem(10, 30, seed=0) solution = least_squares.linear_least_squares(x, y) assert np.allclose(solution, c) def test_weighted_least_squares(): x1, y1, c1 = simple_problem(5, 10, seed=0) x2, y2, c2 = simple_problem(5, 20, seed=1) x = np.concatenate([x1, x2]) y = np.concatenate([y1, y2]) weights = np.concatenate([np.ones(10), np.zeros(20)]) solution = least_squares.weighted_least_squares(x, y, weights) assert np.allclose(solution, c1) weights = np.concatenate([np.zeros(10), np.ones(20)]) solution = least_squares.weighted_least_squares(x, y, weights) assert np.allclose(solution, c2) weights = np.concatenate([np.ones(10) * 0.5, np.ones(20) * 0.5]) solution = least_squares.weighted_least_squares(x, y, weights) assert not np.allclose(solution, c1) and not
np.allclose(solution, c2)
numpy.allclose
# coding=utf-8 # date: 2019/1/21, 14:18 # name: smz import numpy as np import tensorflow as tf from sklearn.utils import shuffle from FNN_skills.modules.model import ModelV1 from FNN_skills.configration.options import opts def split_data(data, ratios, batch_size): ratio1, ratio2, ratio3, ratio4 = ratios.split(":") num_data = len(data) sum_ = float(ratio1) + float(ratio2) + float(ratio3) + float(ratio4) num_ratio1 = int(batch_size * float(ratio1) / sum_) num_ratio2 = int(batch_size * float(ratio2) / sum_) num_ratio3 = int(batch_size * float(ratio3) / sum_) num_ratio4 = int(batch_size * float(ratio4) / sum_) pos = 0 num_ratios = [num_ratio1, num_ratio2, num_ratio3, num_ratio4] batch_data = [] while pos < num_data: try: for num_ratio in num_ratios: batch_data.append(data[pos: pos+num_ratio]) pos = pos + num_ratio except: pass return batch_data def train(): """plan_e: batch_size=100,4个bath一个循环 1:2:3:4, 一份是10 2:3:4:1 3:4:1:2 4:1:2:3 """ train_0_x = np.load("../data/class_0_train_X.npy") train_0_y = np.load("../data/class_0_train_Y.npy") train_1_x = np.load("../data/class_1_train_X.npy") train_1_y =
np.load("../data/class_1_train_Y.npy")
numpy.load
import numpy as np from numpy.testing import (TestCase, assert_equal, assert_almost_equal, assert_array_almost_equal, assert_raises) import astropy.cosmology from astropy import units as u from NFW.nfw import NFW class TestNFW(TestCase): @classmethod def setup_class(cls): cls._cosmo = astropy.cosmology.FlatLambdaCDM(70, 0.3, Tcmb0=0) astropy.cosmology.default_cosmology.set(cls._cosmo) def test_faulty_init(self): assert_raises(ValueError, NFW, 1e15, 5, 0, **{'size_type': "foo"}) assert_raises(ValueError, NFW, 1e15, 5, 0, **{'overdensity_type': "bar"}) def test_overdensity_init(self): nfw = NFW(1e15, 4, 0.3, overdensity=500, overdensity_type="mean") assert_equal(nfw.overdensity, 500) assert (nfw.overdensity_type == "mean") def test_mass_init(self): m200 = 1e15 * u.solMass c = 5. z = 0.3 nfw = NFW(m200, c, z) assert_equal(nfw.c, c) assert_equal(nfw.z, z) assert_almost_equal(nfw.r_s.value, 0.37244989922085564) def test_mass_init_bckg(self): m200 = 1e15 c = 5 z = 0.2 nfw = NFW(m200, c, z, overdensity_type='mean') assert_almost_equal(nfw.radius_Delta(200).value, 3.708462946948883) def test_mean_crit_consistency(self): m200b = 1e15 c = 5 z = 0.3 nfw = NFW(m200b, c, z, overdensity_type='mean') m200c = nfw.mass_Delta(200, overdensity_type='critical').value assert_almost_equal(m200c / 1e15, 2.062054316492159) def test_radius_Delta(self): m200 = 1e15 c = 5. z = 0.3 nfw = NFW(m200, c, z) r200 = nfw.radius_Delta(200) assert_almost_equal(r200.value, 1.8622494961043254) r500 = nfw.radius_Delta(500) assert_almost_equal(r500.value, 1.2310049155128235) r2500 = nfw.radius_Delta(2500)
assert_almost_equal(r2500.value, 0.5519730850580377)
numpy.testing.assert_almost_equal
import numpy as np import scipy.sparse as sp from math import log def MutualInfo(labels_true, labels_pred): """Normalized Mutual Information between two clusterings. This code is modified from sklearn. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. Returns ------- nmi : float Normalized Mutual information, a non-negative value """ contingency = contingency_matrix(labels_true, labels_pred) nzx, nzy, nz_val = sp.find(contingency) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) log_contingency_nm = np.log(nz_val) contingency_nm = nz_val / contingency_sum # Don't need to calculate the full outer product, just for non-zeroes outer = pi.take(nzx) * pj.take(nzy) log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum()) mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) + contingency_nm * log_outer) mi = mi.sum() h_true, h_pred = entropy(labels_true), entropy(labels_pred) nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10) return nmi def contingency_matrix(labels_true, labels_pred): """Build a contingency matrix describing the relationship between labels. This code is extracted from sklearn. Parameters ---------- labels_true : int array, shape = [n_samples] Ground truth class labels to be used as a reference labels_pred : array, shape = [n_samples] Cluster labels to evaluate Returns ------- contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred] Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in true class :math:`i` and in predicted class :math:`j`. If ``eps is None``, the dtype of this array will be integer. If ``eps`` is given, the dtype will be float. Will be a ``scipy.sparse.csr_matrix`` """ classes, class_idx = np.unique(labels_true, return_inverse=True) clusters, cluster_idx = np.unique(labels_pred, return_inverse=True) n_classes = classes.shape[0] n_clusters = clusters.shape[0] # Using coo_matrix to accelerate simple histogram calculation, # i.e. bins are consecutive integers # Currently, coo_matrix is faster than histogram2d for simple cases contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=np.int) contingency = contingency.tocsr() contingency.sum_duplicates() return contingency def entropy(labels): """Calculates the entropy for a labeling.""" if len(labels) == 0: return 1.0 label_idx =
np.unique(labels, return_inverse=True)
numpy.unique
import pytest from numpy import allclose, arange, array, asarray, dot, cov, corrcoef, float64 from thunder.series.readers import fromlist, fromarray from thunder.images.readers import fromlist as img_fromlist pytestmark = pytest.mark.usefixtures("eng") def test_map(eng): data = fromlist([array([1, 2]), array([3, 4])], engine=eng) assert allclose(data.map(lambda x: x + 1).toarray(), [[2, 3], [4, 5]]) assert data.map(lambda x: 1.0*x, dtype=float64).dtype == float64 assert data.map(lambda x: 1.0*x).dtype == float64 def test_map_singletons(eng): data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng) mapped = data.map(lambda x: x.mean()) assert mapped.shape == (2, 1) def test_filter(eng): data = fromlist([array([1, 2]), array([3, 4])], engine=eng) assert allclose(data.filter(lambda x: x.sum() > 3).toarray(), [3, 4]) def test_flatten(eng): arr = arange(2*2*5).reshape(2, 2, 5) data = fromarray(arr, engine=eng) assert data.flatten().shape == (4, 5) assert allclose(data.flatten().toarray(), arr.reshape(2*2, 5)) def test_sample(eng): data = fromlist([array([1, 5]), array([1, 10]), array([1, 15])], engine=eng) assert allclose(data.sample(3).shape, (3, 2)) assert allclose(data.filter(lambda x: x.max() > 10).sample(1).toarray(), [1, 15]) def test_between(eng): data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng) val = data.between(0, 2) assert allclose(val.index, array([0, 1])) assert allclose(val.toarray(), array([[4, 5], [8, 9]])) def test_first(eng): data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng) assert allclose(data.first(), [4, 5, 6, 7]) def test_select(eng): index = ['label1', 'label2', 'label3', 'label4'] data = fromlist([array([4, 5, 6, 7]), array([8, 9, 10, 11])], engine=eng, index=index) assert data.select('label1').shape == (2, 1) assert allclose(data.select('label1').toarray(), [4, 8]) assert allclose(data.select(['label1']).toarray(), [4, 8]) assert allclose(data.select(['label1', 'label2']).toarray(), array([[4, 5], [8, 9]])) assert data.select('label1').index == ['label1'] assert data.select(['label1']).index == ['label1'] def test_standardize_axis1(eng): data = fromlist([array([1, 2, 3, 4, 5])], engine=eng) centered = data.center(1) standardized = data.standardize(1) zscored = data.zscore(1) assert allclose(centered.toarray(), array([-2, -1, 0, 1, 2]), atol=1e-3) assert allclose(standardized.toarray(), array([0.70710, 1.41421, 2.12132, 2.82842, 3.53553]), atol=1e-3) assert allclose(zscored.toarray(), array([-1.41421, -0.70710, 0, 0.70710, 1.41421]), atol=1e-3) def test_standardize_axis0(eng): data = fromlist([array([1, 2]), array([3, 4])], engine=eng) centered = data.center(0) standardized = data.standardize(0) zscored = data.zscore(0) assert allclose(centered.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3) assert allclose(standardized.toarray(), array([[1, 2], [3, 4]]), atol=1e-3) assert allclose(zscored.toarray(), array([[-1, -1], [1, 1]]), atol=1e-3) def test_squelch(eng): data = fromlist([array([1, 2]), array([3, 4])], engine=eng) squelched = data.squelch(5) assert allclose(squelched.toarray(), [[0, 0], [0, 0]]) squelched = data.squelch(3) assert allclose(squelched.toarray(), [[0, 0], [3, 4]]) squelched = data.squelch(1) assert allclose(squelched.toarray(), [[1, 2], [3, 4]]) def test_correlate(eng): data = fromlist([array([1, 2, 3, 4, 5])], engine=eng) sig = [4, 5, 6, 7, 8] corr = data.correlate(sig).toarray() assert allclose(corr, 1) sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]] corrs = data.correlate(sigs).toarray() assert allclose(corrs, [1, -1]) def test_correlate_multiindex(eng): index = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] data = fromlist([array([1, 2, 3, 4, 5])], index=asarray(index).T, engine=eng) sig = [4, 5, 6, 7, 8] corr = data.correlate(sig).toarray() assert allclose(corr, 1) sigs = [[4, 5, 6, 7, 8], [8, 7, 6, 5, 4]] corrs = data.correlate(sigs).toarray() assert allclose(corrs, [1, -1]) def test_clip(eng): data = fromlist([array([1, 2, 3, 4, 5])], engine=eng) assert allclose(data.clip(2).toarray(), [2, 2, 3, 4, 5]) assert allclose(data.clip(2, 3).toarray(), [2, 2, 3, 3, 3]) def test_mean(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.mean().toarray() expected = data.toarray().mean(axis=0) assert allclose(val, expected) assert str(val.dtype) == 'float64' def test_sum(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.sum().toarray() expected = data.toarray().sum(axis=0) assert allclose(val, expected) assert str(val.dtype) == 'int64' def test_var(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.var().toarray() expected = data.toarray().var(axis=0) assert allclose(val, expected) assert str(val.dtype) == 'float64' def test_std(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.std().toarray() expected = data.toarray().std(axis=0) assert allclose(val, expected) assert str(val.dtype) == 'float64' def test_max(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.max().toarray() expected = data.toarray().max(axis=0) assert allclose(val, expected) def test_min(eng): data = fromlist([arange(8), arange(8)], engine=eng) val = data.min().toarray() expected = data.toarray().min(axis=0) assert allclose(val, expected) def test_labels(eng): x = [array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7])] data = fromlist(x, labels=[0, 1, 2, 3], engine=eng) assert allclose(data.filter(lambda x: x[0]>2).labels, array([2, 3])) assert allclose(data[2:].labels, array([2, 3])) assert allclose(data[1].labels, array([1])) assert allclose(data[1, :].labels, array([1])) assert allclose(data[[0, 2]].labels, array([0, 2])) assert allclose(data.flatten().labels, array([0, 1, 2, 3])) x = [array([[0, 1],[2, 3]]), array([[4, 5], [6, 7]])] data = img_fromlist(x, engine=eng).toseries() data.labels = [[0, 1], [2, 3]] assert allclose(data.filter(lambda x: x[0]>1).labels, array([2, 3])) assert allclose(data[0].labels, array([[0, 1]])) assert allclose(data[:, 0].labels, array([[0], [2]])) assert allclose(data.flatten().labels, array([0, 1, 2, 3])) def test_labels_setting(eng): x = [array([0, 1]), array([2, 3]), array([4, 5]), array([6, 7])] data = fromlist(x, engine=eng) with pytest.raises(ValueError): data.labels = [0, 1, 2] def test_index_setting(eng): data = fromlist([array([1, 2, 3]), array([2, 2, 4]), array([4, 2, 1])], engine=eng) assert allclose(data.index, array([0, 1, 2])) data.index = [3, 2, 1] assert allclose(data.index, [3, 2, 1]) with pytest.raises(ValueError): data.index = 5 with pytest.raises(ValueError): data.index = [1, 2] def test_select_by_index(eng): data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng) result = data.select_by_index(1) assert allclose(result.toarray(), array([4, 5, 6, 7])) assert allclose(result.index, array([1, 1, 1, 1])) result = data.select_by_index(1, squeeze=True) assert allclose(result.index, array([0, 1, 2, 3])) index = [ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1], [0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3] ] data.index = array(index).T result, mask = data.select_by_index(0, level=2, return_mask=True) assert allclose(result.toarray(), array([0, 2, 6, 8])) assert allclose(result.index, array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]])) assert allclose(mask, array([1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0])) result = data.select_by_index(0, level=2, squeeze=True) assert allclose(result.toarray(), array([0, 2, 6, 8])) assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]])) result = data.select_by_index([1, 0], level=[0, 1]) assert allclose(result.toarray(), array([6, 7])) assert allclose(result.index, array([[1, 0, 0], [1, 0, 1]])) result = data.select_by_index(val=[0, [2,3]], level=[0, 2]) assert allclose(result.toarray(), array([4, 5])) assert allclose(result.index, array([[0, 1, 2], [0, 1, 3]])) result = data.select_by_index(1, level=1, filter=True) assert allclose(result.toarray(), array([0, 1, 6, 7])) assert allclose(result.index, array([[0, 0, 0], [0, 0, 1], [1, 0, 0], [1, 0, 1]])) def test_aggregate_by_index(eng): data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng) result = data.aggregate_by_index(sum) assert allclose(result.toarray(), array([6, 22, 38])) assert allclose(result.index, array([0, 1, 2])) index = [ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1], [0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3] ] data.index = array(index).T result = data.aggregate_by_index(sum, level=[0, 1]) assert allclose(result.toarray(), array([1, 14, 13, 38])) assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]])) def test_stat_by_index(eng): data = fromlist([arange(12)], index=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], engine=eng) assert allclose(data.stat_by_index('sum').toarray(), array([6, 22, 38])) assert allclose(data.stat_by_index('mean').toarray(), array([1.5, 5.5, 9.5])) assert allclose(data.stat_by_index('min').toarray(), array([0, 4, 8])) assert allclose(data.stat_by_index('max').toarray(), array([3, 7, 11])) assert allclose(data.stat_by_index('count').toarray(), array([4, 4, 4])) assert allclose(data.stat_by_index('median').toarray(), array([1.5, 5.5, 9.5])) assert allclose(data.sum_by_index().toarray(), array([6, 22, 38])) assert allclose(data.mean_by_index().toarray(), array([1.5, 5.5, 9.5])) assert allclose(data.min_by_index().toarray(), array([0, 4, 8])) assert allclose(data.max_by_index().toarray(), array([3, 7, 11])) assert allclose(data.count_by_index().toarray(), array([4, 4, 4])) assert allclose(data.median_by_index().toarray(), array([1.5, 5.5, 9.5])) def test_stat_by_index_multi(eng): index = [ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1], [0, 1, 0, 1, 2, 3, 0, 1, 0, 1, 2, 3] ] data = fromlist([arange(12)], index=array(index).T, engine=eng) result = data.stat_by_index('sum', level=[0, 1]) assert allclose(result.toarray(), array([1, 14, 13, 38])) assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]])) result = data.sum_by_index(level=[0, 1]) assert allclose(result.toarray(), array([1, 14, 13, 38])) assert allclose(result.index, array([[0, 0], [0, 1], [1, 0], [1, 1]])) def test_mean_by_panel(eng): data = fromlist([arange(8)], engine=eng) test1 = data.mean_by_panel(4) assert allclose(test1.index, array([0, 1, 2, 3])) assert allclose(test1.toarray(), [[2, 3, 4, 5]]) test2 = data.mean_by_panel(2) assert allclose(test2.index, array([0, 1])) assert allclose(test2.toarray(), [[3, 4]]) def test_times_array(eng): mat1raw = asarray([[1, 2, 3], [4, 5, 6]]) mat2 = asarray([[7, 8], [9, 10], [11, 12]]) mat1 = fromlist(mat1raw, engine=eng) truth =
dot(mat1raw, mat2)
numpy.dot
import sys sys.path.insert(0,'/Volumes/PILON_HD2/fmueller/Documents/code/ImJoy_dev/rna-loc/') #%% Update system path to find pyfishquant import sys sys.path.insert(0,'/Volumes/PILON_HD2/fmueller/Documents/code/ImJoy_dev/rna-loc/') from rnaloc import LOCtoolbox #%% Test function with entire analysis workflow file_load = '/Volumes/PILON_HD2/fmueller/Documents/Data/ImJoy/rna-loc/CellCortexEnrichment/C1-img1__spots.txt' results_all = LOCtoolbox.process_file(FQ_file=file_load, bin_prop=(0,90,20), channels={'cells':'C3-'}, data_category={'roi':''}, annotation_extension='__RoiSet.zip', img_extension='.tif', show_plots = False, Zrange=(0,0), dZ = 0, plot_callback=None, progress_callback = None) #%% BUILD ENTIRE WORKFLOW import sys import matplotlib.pyplot as plt from scipy import ndimage import numpy as np import re import os # My Stuff import annotationImporter, maskGenerator import FQtoolbox if None: print('a') #%% READ ANNOTATION DATA importlib.reload(annotationImporter) importlib.reload(maskGenerator) # Load data with Folder importer folderImporter = annotationImporter.FolderImporter(channels={'cells':'C3-'}, data_category={'roi':''},annot_ext ='__RoiSet.zip') # Open folder path_open = '/Volumes/PILON_HD2/fmueller/Documents/Data/ImJoy/Segmentation__Dylan/2_ImJoy_membrane/img1/zstack_segmentation' annotDict = folderImporter.load(path_open) print('average roi size:', annotDict['roi_size']) # Generate binary masks for a selected data-set binaryGen = maskGenerator.BinaryMaskGenerator(erose_size=5, obj_size_rem=500, save_indiv=True) # The generate function uses as an input the sub-dictionary for one data-category and one channel annotatFiles = annotDict['roi']['cells'] maskDict = binaryGen.generate(annotatFiles) # Use a loop and the update function to add the mask dictionary to the loaded annotation dictonary\n", for k, v in annotatFiles.items(): v.update(maskDict[k]) ## Print edge mask corresponding to first iterator item data_key = next(iter(annotatFiles)) print(data_key) fig, (ax1 ,ax2) = plt.subplots(1,2) ax1.imshow(annotatFiles[data_key]['image']) ax2.imshow(annotatFiles[data_key]['mask_edge']) #%% What channel to analyze channel = 1 channel = 2 #%% Perform analysis ## Open FQ results file img_size = (960,960) if channel == 1: file_open = '/Volumes/PILON_HD2/fmueller/Documents/Data/ImJoy/Segmentation__Dylan/2_ImJoy_membrane/img1/C1-erm-1_GFP_GFP-525_oEO158-erm-1-610_oEO160-set-3-670_11_R3D_D3D__spots.txt' elif channel == 2: file_open = '/Volumes/PILON_HD2/fmueller/Documents/Data/ImJoy/Segmentation__Dylan/2_ImJoy_membrane/img1/C2-erm-1_GFP_GFP-525_oEO158-erm-1-610_oEO160-set-3-670_11_R3D_D3D__spots.txt' ## Prepare folder to save results drive, path_and_file = os.path.splitdrive(file_open) path, file = os.path.split(path_and_file) file_base, ext = os.path.splitext(file) path_save = os.path.join(path, '#analysis_MembraneDistance') if not os.path.isdir(path_save): os.makedirs(path_save) path_save_ch = os.path.join(path_save,'channel{}'.format(channel)) if not os.path.isdir(path_save_ch): os.makedirs(path_save_ch) fq_dict = FQtoolbox.read_FQ_matlab(file_open) spots_all = FQtoolbox.get_rna(fq_dict) ## Loop over all annotations # bins of histogram binsHist = np.arange(0,90,20) width = 0.8 * (binsHist[1] - binsHist[0]) center = (binsHist[:-1] + binsHist[1:]) / 2 # Other parameters for calculation Zrna = spots_all[:,[18]] dist_membr_RNA = np.array([]) dist_membr_pix = np.array([]) idx = 0 dZ = 2 # Show new dictonar for k, v in annotatFiles.items(): # Get Z coordinate m = re.search('.*__Z([0-9]*)\.tif',k) Zmask = int(m.group(1)) print(Zmask) Zloop = np.logical_and(Zrna <= Zmask + dZ,Zrna >= Zmask - dZ).flatten() spots_loop = spots_all[Zloop,:] spots_loop_XY = spots_loop[:,[16, 17]].astype(int) # Distance transform dist_membr = ndimage.distance_transform_edt(~v['mask_edge']) # Negate mask # Indices have to be inversed to access array dist_membr_RNA_loop = dist_membr[spots_loop_XY[:,0],spots_loop_XY[:,1]] # Get distance from membrane for all pixel in the cell mask_all = v['mask_fill'] + v['mask_edge'] dist_membr_pix_loop = dist_membr[mask_all] # Find min and max values for plotting pad = 10 indMaskAx0 = np.argwhere(mask_all.sum(axis=0)) minAx0 = indMaskAx0[0]-pad maxAx0 = indMaskAx0[-1]+pad indMaskAx1 = np.argwhere(mask_all.sum(axis=1)) minAx1 = indMaskAx1[0]-pad maxAx1 = indMaskAx1[-1]+pad # Save values if idx == 0: dist_membr_RNA = np.copy(dist_membr_RNA_loop) dist_membr_pix = np.copy(dist_membr_pix_loop) else: dist_membr_RNA = np.append(dist_membr_RNA,dist_membr_RNA_loop,axis=0) dist_membr_pix = np.append(dist_membr_pix,dist_membr_pix_loop,axis=0) idx+=1 #### Plot results # Set distance outside of cell to 0 for better plotting dist_membr_plot = np.copy(dist_membr) dist_membr_plot[np.logical_not(mask_all)] = 0 # Calculate histograms histRNA, bins = np.histogram(dist_membr_RNA_loop,binsHist ,density=False) histpix, bins = np.histogram(dist_membr_pix_loop,binsHist ,density=False) histRNAnorm = histRNA/histRNA.sum() histpixnorm = histpix/histpix.sum() histRNAnormPix = np.divide(histRNAnorm,histpixnorm) histRNAnormPix = np.nan_to_num(histRNAnormPix) # Generate plot fig1, ax = plt.subplots(2,3,num='C{}-Cell cortex analysis. Z={}'.format(channel,Zmask)) fig1.set_size_inches((13,6)) ax[0][0].imshow(v['image'],cmap="hot") ax[0][0].get_xaxis().set_visible(False) ax[0][0].get_yaxis().set_visible(False) ax[0][0].set_xlim(minAx0, maxAx0) ax[0][0].set_ylim(minAx1, maxAx1) ax[0][1].imshow(mask_all,cmap="hot") ax[0][1].get_xaxis().set_visible(False) ax[0][1].get_yaxis().set_visible(False) ax[0][1].set_xlim(minAx0, maxAx0) ax[0][1].set_ylim(minAx1, maxAx1) imgdum = ax[0][2].imshow(dist_membr_plot,cmap="hot") ax[0][2].set_xlim(minAx0, maxAx0) ax[0][2].set_ylim(minAx1, maxAx1) ax[0][2].get_xaxis().set_visible(False) ax[0][2].get_yaxis().set_visible(False) FQtoolbox.colorbar(imgdum) for kROI, vROI in v['roi'].items(): roi_pos = vROI['pos'] ax[0][2].plot(roi_pos[:,1],roi_pos[:,0],'b-') ax[0][2].scatter(spots_loop_XY[:,1],spots_loop_XY[:,0],color='g',s=4) ax[1][0].bar(center, histRNA, align='center', width=width) ax[1][0].set_xticks(center) ax[1][0].set_xticklabels(center.astype(int)) ax[1][0].set_xlabel('Distance from cell cortex [pixel]') ax[1][0].set_ylabel('# RNAs') ax[1][1].bar(center, histpix, align='center', width=width) ax[1][1].set_xticks(center) ax[1][1].set_xticklabels(center.astype(int)) ax[1][1].set_xlabel('Distance from cell cortex [pixel]') ax[1][1].set_ylabel('# pixels') ax[1][2].bar(center, histRNAnormPix, align='center', width=width) ax[1][2].set_xticks(center) ax[1][2].set_xticklabels(center.astype(int)) ax[1][2].set_xlabel('Distance from cell cortex [pixel]') ax[1][2].set_ylabel('Renormalized frequency') # Set titles ax[0][0].title.set_text('Cell cortex') ax[0][1].title.set_text('Cell mask') ax[0][2].title.set_text('Distance transform') ax[1][0].title.set_text('RNAs') ax[1][1].title.set_text('All pixel') ax[1][2].title.set_text('Renormalized RNA distance') plt.tight_layout() # Save path_save_ch = os.path.join(path_save,'channel{}'.format(channel)) file_save ='C{}_Z{}.png'.format(channel,Zmask) plt.savefig(os.path.join(path_save_ch, file_save),dpi=200) plt.close() #### Save results if channel == 1: C1_RNAcounts = np.copy(dist_membr_RNA) C1_pixcounts =
np.copy(dist_membr_pix)
numpy.copy