prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 19:31:48 2020
@author: Rushad
"""
import warnings
import numpy as np
from scipy import signal, polyval
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
class TransferFunction():
'''
Define the Transfer Functions in standard form only.
'''
def __init__(self, num_coef, den_coef):
'''
Parameters
----------
num_coef : numpy array OR list
DESCRIPTION. Coefficient of Transfer Function's Numerator
den_coef : TYPE numpy array OR list
DESCRIPTION. Coefficient of Transfer Function's Denominator
Returns
-------
None.
'''
self.num_coef = np.array(num_coef)
self.den_coef = np.array(den_coef)
self.num_coef = self.num_coef.reshape([len(self.num_coef), 1])
self.den_coef = self.den_coef.reshape([len(self.den_coef), 1])
self.order = max(len(self.num_coef), len(self.den_coef)) - 1
def display(self):
'''
Displays TF block
'''
num_str = ""
for n in range(len(self.num_coef)):
if n < len(self.num_coef)-1: #if not last
if n != len(self.num_coef)-2: #if not second last
if self.num_coef[n] != 1 and self.num_coef[n] != 0: #if coef is not zero and one
num_str = num_str + str(float(self.num_coef[n])) + "*S^" + str(abs(n-len(self.num_coef)+1)) + " + "
elif self.num_coef[n] == 1: #if coef is one
num_str = num_str + "S^" + str(abs(n-len(self.num_coef)+1)) + " + "
elif self.num_coef[n] == 0: #if coef is zero
pass
else: #if second last
if self.num_coef[n] != 1 and self.num_coef[n] != 0: #if coef is not zero and one
num_str = num_str + str(float(self.num_coef[n])) + "*S" + " + "
elif self.num_coef[n] == 1: #if coef is one
num_str = num_str + "S" + " + "
elif self.num_coef[n] == 0: #if coef is zero
pass
else: #if last
if self.num_coef[n] != 0: #if coef is not zero
num_str = num_str + str(float(self.num_coef[n]))
elif self.num_coef[n] == 0: #if coef is zero
num_str = num_str[:-3]
den_str = ""
for d in range(len(self.den_coef)):
if d < len(self.den_coef)-1: #if not last
if d != len(self.den_coef)-2: #if not second last
if self.den_coef[d] != 1 and self.den_coef[d] != 0: #if coef not zero and one
den_str = den_str + str(float(self.den_coef[d])) + "*S^" + str(abs(d-len(self.den_coef)+1)) + " + "
elif self.den_coef[d] == 1: #if coef is one
den_str = den_str + "S^" + str(abs(d-len(self.den_coef)+1)) + " + "
elif self.den_coef[d] == 0: #if coef is zero
pass
else: #if second last
if self.den_coef[d] != 1 and self.den_coef[d] != 0: #if coef is not zero and one
den_str = den_str + str(float(self.den_coef[d])) + "*S" + " + "
elif self.den_coef[d] == 1: #if coef is one
den_str = den_str + "S" + " + "
elif self.den_coef[d] == 0: #if coef is zero
pass
else: #if last
if self.den_coef[d] != 0: #if coef is not zero
den_str = den_str + str(float(self.den_coef[d]))
elif self.den_coef[d] == 0: #if coef is zero
den_str = den_str[:-3]
div_line_len = max(len(num_str), len(den_str))
div_line = div_line_len*"-"
tf_disp = str(num_str + " \n" + div_line + " \n" + den_str)
print(tf_disp)
def parameters(self, settling_time_tolerance=0.02):
'''
Parameters
----------
settling_time_tolerance : float, optional
DESCRIPTION. Tolerance limit for error in settling time. The default is 0.05 (5%)
Returns
-------
parameter : dictionary
DESCRIPTION. Dictionary containing all the parameters/time domain specifications
'''
self.order = max(len(self.num_coef), len(self.den_coef)) - 1
self.settling_time_tolerance = settling_time_tolerance
if self.order == 1:
self.gain = float(self.num_coef[0])
self.time_constant = float(self.den_coef[0])
parameter = {"Order":self.order, "Gain":self.gain, "Time Constant":self.time_constant}
return parameter
elif self.order == 2:
self.gain = float(self.num_coef[0]/self.den_coef[2])
self.natural_frequency = float(np.sqrt(self.den_coef[2]))
self.damping_ratio = float(self.den_coef[1]/(2*self.natural_frequency))
self.damped_freq = self.natural_frequency*np.sqrt(abs(1 - self.damping_ratio**2))
self.phase_angle = float(np.arctan(np.sqrt(np.abs(1 - self.damping_ratio**2))/self.damping_ratio))
self.rise_time = float((np.pi - self.phase_angle)/(self.natural_frequency*np.sqrt(abs(1 - self.damping_ratio**2))))
self.peak_time = float(np.pi/(self.natural_frequency*np.sqrt((abs(1 - self.damping_ratio**2)))))
self.max_overshoot = float(np.exp((-self.damping_ratio*np.pi)/(np.sqrt(abs( 1 - self.damping_ratio**2)))*100))
self.settling_time = float(-np.log(self.settling_time_tolerance*np.sqrt(abs(1 - self.damping_ratio**2)))/(self.damping_ratio*self.natural_frequency))
parameter = {"Order":self.order, "Gain":self.gain,"Natural Frequency":self.natural_frequency, "Damping Frequency":self.damped_freq, "Damping Ratio":self.damping_ratio, "Phase Angle":self.phase_angle, "Rise Time":self.rise_time, "Peak Time":self.peak_time, "Max Overshoot":self.max_overshoot, "Settling Time":self.settling_time}
return parameter
elif self.order > 2:
print("[WARNING] You have inputed a system of Order:" + str(max(len(self.num_coef), len(self.den_coef))-1) + ". Currently supports first and second order systems")
def response(self, input_type, time_period=10, sample_time=0.05, ret=False, show=True):
'''
Parameters
----------
input_type : string
DESCRIPTION. input signal type: impulse, step or ramp
time_period : integer, optional
DESCRIPTION. The time duration the signal is processed for. The default is 10.
sample_time : float, optional
DESCRIPTION. Sample time of the signal. The default is 0.05.
ret : bool, optional
DESCRIPTION. Set to True if the systems response is to be returned. The default is False.
show : bool, optional
DESCRIPTION. Set to True if the systems response is to be displayed. The default is True.
Returns
-------
resp : numpy array
DESCRIPTION. numpy array of response of the system. Is only returned if ret is set to True
'''
controller_time = np.array([i for i in np.arange(0, time_period, sample_time)])
input_resp = {"impulse":"impulse(self)", "step":"step(self)", "ramp":"ramp(self)"}
def impulse(self):
sys = signal.lti(self.num_coef.reshape(len(self.num_coef)), self.den_coef.reshape(len(self.den_coef)))
_,resp = signal.impulse(sys, T=controller_time)
return resp
def step(self):
sys = signal.lti(self.num_coef.reshape(len(self.num_coef)), self.den_coef.reshape(len(self.den_coef)))
_,resp = signal.step(sys, T=controller_time)
return resp
def ramp(self):
def ramp_order1(self):
resp = float(self.num_coef[0])*(float(-self.den_coef[0]) + controller_time + np.exp(-controller_time/float(self.den_coef[0])))
return resp
def ramp_order2(self):
natural_frequency = float(np.sqrt(self.den_coef[2]))
damping_ratio = float(self.den_coef[1]/(2*natural_frequency))
if 0 <= float(damping_ratio) < 1:
resp = (1/natural_frequency**2)*((controller_time + (np.exp(-damping_ratio*natural_frequency*controller_time)/natural_frequency)*((2*damping_ratio*np.cos(natural_frequency*np.sqrt(1 - damping_ratio**2)*controller_time)) + (((2*damping_ratio**2 -1)/np.sqrt(1 - damping_ratio**2))*np.sin(natural_frequency*np.sqrt(1 - damping_ratio**2)*controller_time))) - (2*damping_ratio/natural_frequency)))
elif float(damping_ratio) == 1:
resp = (1/natural_frequency**2)*(controller_time + ((2*np.exp(-natural_frequency*controller_time))/natural_frequency) + (controller_time*np.exp(-natural_frequency*controller_time)) - (2/natural_frequency))
elif float(damping_ratio) > 1:
resp = (1/damping_ratio**2)*(controller_time + (natural_frequency/(2*np.sqrt(np.abs(1 - damping_ratio**2))))*((((1/((damping_ratio*natural_frequency) - np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency))**2)*np.exp(-controller_time/(1/((damping_ratio*natural_frequency) - np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency)))) - (((1/((damping_ratio*natural_frequency) + np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency))**2)*(np.exp(-controller_time/(1/((damping_ratio*natural_frequency) + np.sqrt(np.abs(1 - damping_ratio**2))*natural_frequency)))))) - (2*damping_ratio/natural_frequency))
return resp
if self.order == 1:
resp = ramp_order1(self)
return resp
elif self.order == 2:
resp = float(self.num_coef[0]/self.den_coef[2])*ramp_order2(self)
return resp
elif self.order > 2:
print("[WARNING] You have inputed a system of Order:" + str(max(len(self.num_coef), len(self.den_coef))-1) + ". Ramp response currently supports first and second order systems")
resp = eval(input_resp[input_type])
if show == True:
plt.plot(controller_time, resp)
plt.show()
if ret == True:
return resp
def pzplot(self, ret=True):
'''
Plots Pole-Zero plot of the system
'''
if len(self.num_coef) >= 1:
self.zeros = np.roots(self.num_coef.reshape(len(self.num_coef)))
plt.plot(self.zeros.real, self.zeros.imag, "o", label="Zeros")
if len(self.den_coef) >= 1:
self.poles = np.roots(self.den_coef.reshape(len(self.den_coef)))
plt.plot(self.poles.real, self.poles.imag, "x", label="Poles")
plt.xlabel('Re')
plt.ylabel('Im')
plt.grid(True, which="both")
plt.legend()
plt.show()
if ret == True:
return self.poles, self.zeros
def stability(self):
'''
Returns
-------
state : String
Prints stability of the system
'''
if len(self.den_coef >= 1):
poles = np.roots(self.den_coef.reshape(len(self.den_coef)))
poles_round = np.array(poles.imag, dtype="int")
if (poles.real < 0).all():
state = "System is Stable"
elif np.count_nonzero(poles_round) != len(poles_round) and (poles.real <= 0).all():
if np.sum(poles) == np.sum(np.unique(poles)):
state = "System is Marginally Stable"
else:
state = "System in Unstable"
else:
state = "System is Unstable"
return state
def convert2SS(self):
'''
Returns
-------
SpaceState object
DESCRIPTION. Converts TransferFunction object to StateSpace object
'''
A,B,C,D = signal.tf2ss(self.num_coef.reshape(-1), self.den_coef.reshape(-1))
self.state_space_rep = StateSpace(A,B,C,D)
return self.state_space_rep
class feedback(TransferFunction):
'''
Add feedback TF to open loop TF. Define in standard form only.
'''
def __init__(self, G, H=1.0, feedback_type="negative"):
'''
Parameters
----------
G : TransferFunction object
DESCRIPTION. TF the feedback is to be implemented on
H : TransferFunction object / integer / float, optional
DESCRIPTION. Feedback block. The default is 1 (unity feedback)
feedback_type : Negative or Positive feedback, optional
DESCRIPTION. The default is "negative".
Returns
-------
None.
'''
if type(H) == TransferFunction:
G_num = G.num_coef
G_num = G.num_coef.reshape(len(G_num))
G_den = G.den_coef
G_den = G.den_coef.reshape(len(G_den))
H_num = H.num_coef
H_num = H.num_coef.reshape(len(H_num))
H_den = H.den_coef
H_den = H.den_coef.reshape(len(H_den))
if feedback_type == "negative":
feedback_num = np.polymul(G_num, H_den)
feedback_den = np.polyadd(np.polymul(G_den, H_den), np.polymul(G_den, H_num))
elif feedback_type == "positive":
feedback_num = np.polymul(G_num, H_den)
feedback_den = np.polysub(np.polymul(G_den, H_den), np.polymul(G_den, H_num))
elif type(H) == float or type(H) == int:
num = G.num_coef
den = G.den_coef
if feedback_type == "negative":
feedback_den0 = float(den[0])
feedback_den1 = float(den[1])
feedback_den2 = float(den[2] + (num[-1]/H))
elif feedback_type == "positive":
feedback_den0 = float(den[0])
feedback_den1 = float(den[1])
feedback_den2 = float(den[2] - (num[-1]/H))
feedback_num = num
feedback_den = np.array([feedback_den0, feedback_den1, feedback_den2])
feedback_num = feedback_num.reshape([len(feedback_num), 1])
feedback_den = feedback_den.reshape([len(feedback_den), 1])
self.num_coef = feedback_num
self.den_coef = feedback_den
self.feedback_tf = TransferFunction(self.num_coef, self.den_coef)
self.order = self.feedback_tf.order
class PID():
'''
PID control on a TF
'''
def __init__(self, K_p, K_i, K_d, tf):
'''
Parameters
----------
K_p : float
DESCRIPTION. Proportional Gain
K_i : float
DESCRIPTION. Integral Gain
K_d : float
DESCRIPTION. Derivative Gain
tf : TranferFunction object
DESCRIPTION. TF on which PID is to be implemeted
Returns
-------
None.
'''
self.K_p = K_p
self.K_i = K_i
self.K_d = K_d
self.tf = tf
pid_num = [self.K_d, self.K_p, self.K_i]
pid_den = [1, 0]
num = tf.num_coef
den = tf.den_coef
tf_num = list(tf.num_coef.reshape(len(num),))
tf_den = list(tf.den_coef.reshape(len(den),))
num_diff = len(pid_num) - len(tf_num)
den_diff = len(pid_den) - len(tf_den)
try:
if len(tf_num) < len(pid_num):
temp_num = np.zeros(num_diff)
tf_num = np.concatenate((temp_num, tf_num))
elif len(tf_num) > len(pid_num):
temp_num = np.zeros(abs(num_diff))
pid_num = np.concatenate((temp_num, pid_num))
if len(tf_den) < len(pid_den):
temp_den = np.zeros(den_diff)
tf_den = np.concatenate((temp_den, tf_den))
elif len(tf_den) > len(pid_den):
temp_den = np.zeros(abs(den_diff))
pid_den = np.concatenate((temp_den, pid_den))
except ValueError:
pass
reduced_tf_num = np.polymul(np.array(tf_num), np.array(pid_num))
reduced_tf_den = np.polymul(np.array(tf_den), np.array(pid_den))
self.reduced_tf = TransferFunction(reduced_tf_num, reduced_tf_den)
def display(self):
'''
Displays the PID TF block
'''
num_str = str(self.K_d) + "*S^2 + " + str(self.K_p) + "*S + " + str(self.K_i)
den_str = round(len(num_str)/2)*" " + "S" + " "*round(len(num_str)/2)
div_line_len = max(len(num_str), len(den_str))
div_line = div_line_len*"-"
pid_tf_disp = str(num_str + " \n" + div_line + " \n" + den_str)
print(pid_tf_disp)
def response(self, input_type, time_period=10, sample_time=0.05, ret=False, show=True):
'''
Parameters
----------
input_type : string
DESCRIPTION. input signal type: impulse, step or ramp
time_period : integer, optional
DESCRIPTION. The time duration the signal is processed for. The default is 10.
sample_time : float, optional
DESCRIPTION. Sample time of the signal. The default is 0.05.
ret : bool, optional
DESCRIPTION. Set to True if the systems response is to be returned. The default is False.
show : bool, optional
DESCRIPTION. Set to True if the systems response is to be displayed. The default is True.
Returns
-------
resp : numpy array
DESCRIPTION. numpy array of response of the system. Is only returned if ret is set to True
'''
try:
resp = self.reduced_tf.response(input_type, time_period, sample_time, ret, show)
if ret == True:
return resp
except ValueError:
print("Improper transfer function. `num` is longer than `den`.")
def tune(self, input_type="step", set_point=1, num_itr=70, rate=0.00000000001, lambd=0.7):
'''
Parameters
----------
input_type : input signal type, optional
DESCRIPTION. The default is "step" input.
set_point : Optimal steady state value, optional
DESCRIPTION. The default is 1.
num_itr : number of iterations, optional
DESCRIPTION. The default is 70. Might have to adjust this to prevent the cost from increasing after decreasing.
rate : learning rate, optional
DESCRIPTION. The default is 0.00000000001.
lambd : regularization coefficient, optional
DESCRIPTION. The default is 0.7
Returns
-------
k : numpy array
DESCRIPTION. numpy array of Kp, Ki, Kd values
'''
np.random.seed(1)
k = np.random.random(3).reshape(3,1)
def red_tf():
pid_num = [k[2][0], k[0][0], k[1][0]]
pid_den = [1, 0]
num = self.tf.num_coef
den = self.tf.den_coef
tf_num = list(self.tf.num_coef.reshape(len(num),))
tf_den = list(self.tf.den_coef.reshape(len(den),))
num_diff = len(pid_num) - len(tf_num)
den_diff = len(pid_den) - len(tf_den)
try:
if len(tf_num) < len(pid_num):
temp_num = np.zeros(num_diff)
tf_num = np.concatenate((temp_num, tf_num))
elif len(tf_num) > len(pid_num):
temp_num = np.zeros(abs(num_diff))
pid_num = np.concatenate((temp_num, pid_num))
if len(tf_den) < len(pid_den):
temp_den = np.zeros(den_diff)
tf_den = np.concatenate((temp_den, tf_den))
elif len(tf_den) > len(pid_den):
temp_den = np.zeros(abs(den_diff))
pid_den = np.concatenate((temp_den, pid_den))
except ValueError:
pass
reduced_tf_num = np.polymul(np.array(tf_num), np.array(pid_num))
reduced_tf_den = np.polymul(np.array(tf_den), np.array(pid_den))
reduced_tf = TransferFunction(reduced_tf_num, reduced_tf_den)
resp = reduced_tf.response(input_type, ret=True, show=False)
return resp
costs = []
y_hat = red_tf()
m = len(y_hat)
for n in range(num_itr):
for s in range(1,m):
y_hat = red_tf()
y = np.zeros(m) + set_point
loss = (1/2)*((y_hat - y)**2)
cost = abs(-float((1/m)*np.sum(loss)) + (lambd/(2*m))*np.sum(k**2))
grad_kp = (y_hat + y)/(s*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s))) + (lambd/(2*m))*2*k[0]
grad_ki = (y_hat + y)/(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s)) + (lambd/(2*m))*2*k[1]
grad_kd = (y_hat + y)/((s**2)*(polyval(self.tf.num_coef, s)/polyval(self.tf.den_coef, s))) + (lambd/(2*m))*2*k[2]
grads = np.array([grad_kp, grad_ki, grad_kd])
for i in range(m):
k = k - rate*grads
if n%10 == 0:
print(f"cost {n}: {np.squeeze(cost)}")
if n%20 == 0:
costs.append(cost)
k = k[:,-1]
self.K_p = k[0]
self.K_i = k[1]
self.K_d = k[2]
plt.plot(costs)
plt.show()
return k
class reduce():
'''
Block Reduction
'''
def series(*tfs):
'''
Parameters
----------
*tfs : TransferFunction objects
DESCRIPTION. TF objects which need to be reduced
Returns
-------
tfs : TransferFunction object
DESCRIPTION. Reduced TransferFunction object
'''
tf_len = len(tfs)
def series_mul(prev_tf, next_tf):
tf_num = np.polymul(prev_tf.num_coef.reshape(len(prev_tf.num_coef)), next_tf.num_coef.reshape(len(next_tf.num_coef)))
tf_den = np.polymul(prev_tf.den_coef.reshape(len(prev_tf.den_coef)), next_tf.den_coef.reshape(len(next_tf.den_coef)))
return tf_num, tf_den
for i in range(tf_len-1):
num, den = series_mul(tfs[0], tfs[1])
tf = TransferFunction(num, den)
tfs = (tf,) + tfs[2:]
tfs = tfs[0]
return tfs
def parallel(*tfs):
tf_len = len(tfs)
def para_mul(prev_tf, next_tf):
tf_num = np.add(np.polymul(prev_tf.num_coef.reshape(len(prev_tf.num_coef)), next_tf.den_coef.reshape(len(next_tf.den_coef))), np.polymul(prev_tf.den_coef.reshape(len(prev_tf.den_coef)), next_tf.num_coef.reshape(len(next_tf.num_coef))))
tf_den = np.polymul(prev_tf.den_coef.reshape(len(prev_tf.den_coef)), next_tf.den_coef.reshape(len(next_tf.den_coef)))
return tf_num, tf_den
for i in range(tf_len-1):
num, den = para_mul(tfs[0], tfs[1])
tf = TransferFunction(num, den)
tfs = (tf,) + tfs[2:]
tfs = tfs[0]
return tfs
class StateSpace():
'''
State Space Representation
'''
def __init__(self, A, B, C, D):
'''
Parameters
----------
A : numpy matrix
DESCRIPTION. A matix of State Space system
B : numpy matrix
DESCRIPTION. B matix of State Space system
C : numpy matrix
DESCRIPTION. C matix of State Space system
D : numpy matrix
DESCRIPTION. D matix of State Space system
Returns
-------
None.
'''
self.A = | np.array(A) | numpy.array |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2019 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Weak antilocalisation analysis routines.
The routines is this module assume that no linear Dresselhaus is present.
"""
import numpy as np
from numba import njit
from scipy.special import digamma
def compute_wal_conductance_difference(field, dephasing_field, linear_soi,
cubic_soi, low_field_reference,
series_truncation=5000):
"""Variation in the conductance induced by SOI.
The calculation is extracted from:
<NAME> al., Weak Antilocalization and Spin Precession in Quantum Wells.
Physical Review B. 53, 3912–3924 (1996).
We use Formula 37,38 in which we consider no linear Dresselhaus term.
Parameters
----------
field : float | np.array
Magnetic field at which to compute the conductance.
dephasing_field : float
Dephasing field used in the model. The unit should the same as the one
used to express the magnetic field.
linear_soi : float
Field describing the contribution of the linear term in the spin-orbit
coupling. The unit should the same as the one used to express the
magnetic field.
cubic_soi : float
Field describing the contribution of the cubic term arising from
Dresselhaus SOI. The unit should the same as the one used to express
the magnetic field.
low_field_reference : float
Reference field at which to compute the conductance difference.
series_truncation : int, optional
Last term of the series involved in the conductance expression to
compute (the default is 5000)
Returns
-------
delta_sigma : float | np.ndarray
Difference between the conductance at the reference field and the
conductance at the specified field. The conductance is expressed in
term of the quantum of conductance.
"""
# Field dependant fitting parameters
dephasing_r = | np.abs(dephasing_field/field) | numpy.abs |
import numpy as np
import pandas as pd
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
roc_auc_score,
precision_recall_curve,
auc,
mean_squared_error,
mean_absolute_error,
r2_score,
)
from math import sqrt
def root_mean_squared_error(y_true, preds):
return sqrt(mean_squared_error(y_true, preds))
def mean_absolute_percentage_error(y_true, y_pred):
df = pd.DataFrame({"y_true": y_true, "y_pred": y_pred})
df = df[df["y_true"] != 0].copy(deep=True)
y_true = df["y_true"]
y_pred = df["y_pred"]
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def precision_recall_auc(y_true=None, pred_probabs=None):
clf_precision, clf_recall, _ = precision_recall_curve(y_true, pred_probabs)
score = auc(clf_recall, clf_precision)
return score
def init_model_metrics(metrics=[]):
"""
Function to init dictionary that stores metric functions and metric scores
:param metrics: list of strings for metrics to store in dictionary
:return: dictionary that with _func _score metric pairings
"""
metric_dictionary = {}
# Classification Metrics
if "accuracy" in metrics:
metric_dictionary["accuracy_func"] = accuracy_score
metric_dictionary["accuracy_scores"] = np.array([])
if "f1" in metrics:
metric_dictionary["f1_func"] = f1_score
metric_dictionary["f1_scores"] = np.array([])
if "precision" in metrics:
metric_dictionary["precision_func"] = precision_score
metric_dictionary["precision_scores"] = np.array([])
if "recall" in metrics:
metric_dictionary["recall_func"] = recall_score
metric_dictionary["recall_scores"] = np.array([])
if "roc_auc" in metrics:
metric_dictionary["roc_auc_func"] = roc_auc_score
metric_dictionary["roc_auc_scores"] = np.array([])
if "precision_recall_auc" in metrics:
metric_dictionary["precision_recall_auc_func"] = precision_recall_auc
metric_dictionary["precision_recall_auc_scores"] = | np.array([]) | numpy.array |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue( | np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]) | numpy.allclose |
from torch.utils import data
from os.path import join
from PIL import Image
import numpy as np
import cv2
def prepare_image_cv2(im):
im = cv2.resize(im, dsize=(400, 400), interpolation=cv2.INTER_LINEAR)
im = np.transpose(im, (2, 0, 1)) # (H x W x C) to (C x H x W)
return im
class BSDS_Dataset(data.Dataset):
"""
BSDS500
"""
def __init__(self, root='../DATA/data', split='train', transform=False, scale=None):
self.root = root
self.split = split
self.transform = transform
self.scale = scale
self.bsds_root = join(root, 'HED-BSDS')
if self.split == 'train':
self.filelist = join(self.root, 'bsds_pascal_train_pair.lst')
elif self.split == 'test':
self.filelist = join(self.bsds_root, 'test_pair.lst')
else:
raise ValueError("Invalid split type!")
with open(self.filelist, 'r') as f:
self.filelist = f.readlines()
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
r = np.random.randint(0, 100000)
if self.split == "train":
img_file, lb_file = self.filelist[index].split()
lb = np.array(Image.open(join(self.root, lb_file)), dtype=np.float32)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = cv2.resize(lb, (400, 400), interpolation=cv2.INTER_LINEAR)
lb = lb[np.newaxis, :, :]
lb[lb == 0] = 0
lb[np.logical_and(lb > 0, lb < 64)] = 2
lb[lb >= 64] = 1
# lb[lb >= 128] = 1
img = np.array(cv2.imread(join(self.root, img_file)), dtype=np.float32)
img = prepare_image_cv2(img)
return img, lb
else:
img_file, lb_file = self.filelist[index].split()
data = []
data_name = []
original_img = np.array(cv2.imread(join(self.bsds_root, img_file)), dtype=np.float32)
img = cv2.resize(original_img, dsize=(400, 400), interpolation=cv2.INTER_LINEAR)
if self.scale is not None:
for scl in self.scale:
img_scale = cv2.resize(img, None, fx=scl, fy=scl, interpolation=cv2.INTER_LINEAR)
data.append(img_scale.transpose(2, 0, 1))
data_name.append(img_file)
return data, img, data_name
img = prepare_image_cv2(img)
lb = np.array(Image.open(join(self.bsds_root, lb_file)), dtype=np.float32)
if lb.ndim == 3:
lb = np.squeeze(lb[:, :, 0])
assert lb.ndim == 2
lb = cv2.resize(lb, (400, 400), interpolation=cv2.INTER_LINEAR)
lb = lb[np.newaxis, :, :]
lb[lb == 0] = 0
lb[ | np.logical_and(lb > 0, lb < 64) | numpy.logical_and |
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import math
from .lookup_function import LookupFunction, VoxelLookupFunction
import collections
import warnings
from functools import reduce
class SparseHistogram(object):
"""
Base class for sparse-based histograms.
Parameters
----------
bin_widths : array-like
bin (voxel) size
left_bin_edges : array-like
lesser side of the bin (for each direction)
"""
def __init__(self, bin_widths, left_bin_edges):
self.bin_widths = np.array(bin_widths)
if left_bin_edges is None:
self.left_bin_edges = None
else:
self.left_bin_edges = np.array(left_bin_edges)
self.count = 0
self.name = None
self._histogram = None
def empty_copy(self):
"""Returns a new histogram with the same bin shape, but empty"""
return type(self)(self.bin_widths, self.left_bin_edges)
def histogram(self, data=None, weights=None):
"""Build the histogram.
Parameters
----------
data : list of list of floats
input data
weights : list of floats
weight for each input data point
Returns
-------
collection.Counter :
copy of the current counter
"""
if data is None and self._histogram is None:
raise RuntimeError("histogram() called without data!")
elif data is not None:
self._histogram = collections.Counter({})
return self.add_data_to_histogram(data, weights)
else:
return self._histogram.copy()
@staticmethod
def sum_histograms(hists):
# (w, r) = (hists[0].bin_width, hists[0].bin_range)
# newhist = Histogram(bin_width=w, bin_range=r)
newhist = hists[0].empty_copy()
newhist._histogram = collections.Counter({})
for hist in hists:
if not newhist.compare_parameters(hist):
raise RuntimeError
newhist.count += hist.count
newhist._histogram += hist._histogram
return newhist
def map_to_float_bins(self, trajectory):
return ( | np.asarray(trajectory) | numpy.asarray |
from __future__ import print_function
import os
import cv2
import string
import random
import numpy as np
from copy import deepcopy
class dataLoader(object):
def __init__(self, directory, dataset_dir, dataset_name, max_steps,
image_width, image_height, grd_attn=False, mode='Train'):
self.mode = mode
self.grd_attn = grd_attn
self.max_steps = max_steps
self.image_width = image_width
self.image_height = image_height
self.directory = directory
self.dataset_dir = dataset_dir
self.dataset_name = dataset_name
self.load_data()
def load_data(self):
all_data = []
# Full images file path
file_path = os.path.join(self.directory, self.dataset_name)
# Get characters
az = string.ascii_lowercase
AZ = string.ascii_uppercase
nm = string.digits
# Append all characters
all_selections = []
for i in range(len(az)):
all_selections.append(az[i])
for i in range(len(AZ)):
all_selections.append(AZ[i])
for i in range(len(nm)):
all_selections.append(nm[i])
with open(file_path, 'r') as f:
frames = f.readlines()
for i in range(0, len(frames), self.max_steps):
interm_data = []
for u in range(self.max_steps):
frame = frames[i+u]
path, label, _, _, _, _ = frame.split(', ')
if self.mode == 'Test':
path = int(path[0:-4]) + 55000
path = str(path) + '.png'
if self.mode == 'Valid':
path = int(path[0:-4]) + 40000
path = str(path) + '.png'
label = all_selections.index(label) # Convert to label category
interm_data.append([path, label])
# Collect
all_data.append(interm_data)
self.all_data = all_data
self.max_length = len(self.all_data)
self.possible_pred = len(all_selections)
print('All data Loaded!')
def gen_random_data(self):
while True:
indices = list(range(len(self.all_data)))
random.shuffle(indices)
for i in indices:
data = self.all_data[i]
yield data
def gen_val_data(self):
while True:
indices = range(len(self.all_data))
for i in indices:
data = self.all_data[i]
yield data
def gen_data_batch(self, batch_size):
# Generate data based on training/validation
if self.mode == 'Train':
# Randomize data
data_gen = self.gen_random_data()
else:
# Validation Data generation
data_gen = self.gen_val_data()
# Create max steps
mxstep_label = | np.zeros(self.max_steps) | numpy.zeros |
'''
Code for output results for analysis.
Please cite:
Development and External Validation of a Mixed-Effects Deep Learning Model to Diagnose COVID-19 from CT Imaging
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
medRxiv 2022.01.28.22270005; doi: https://doi.org/10.1101/2022.01.28.22270005
<EMAIL>
github.com/JTBridge/ME-COVID19
Apache License 2.0
'''
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from data_loader import data_gen
from tensorflow.keras import losses, optimizers, models, metrics, layers, applications, regularizers, backend
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
import os, sklearn
import numpy as np
import matplotlib.pyplot as plt
from spatial import ME, normal, mse_wgt
from ME import ME_model
from comparison_models import bai, covinet, covnet
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
tf.keras.mixed_precision.set_global_policy('mixed_float16')
mtype = 'ME'
px = 256
slices = 20
wgt0 = (1/169) * (1025/2.0)
wgt1 = (1/856) * (1025/2.0)
t0 = 1/(169+2)
t1 = (856+1)/(856+2)
if mtype == 'ME':
model=ME_model()
model.compile(
loss={'beta':normal, 'prob':mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1])},
optimizer=optimizers.Adam(1e-4),
metrics={'prob':[metrics.AUC(name='AUC')]}
)
if mtype == 'Bai':
model=bai()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
if mtype == 'covinet':
model=covinet()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
if mtype == 'covnet':
model=covnet()
model.compile(
loss=mse_wgt(class_weights=[wgt0, wgt1], targets=[t0, t1]),
optimizer=optimizers.Adam(1e-4),
metrics=metrics.AUC(name='AUC'))
print(model.summary())
if mtype == "ME":
model.load_weights('../models/ME.h5')
elif mtype == 'Bai':
model.load_weights('../models/Bai.h5')
elif mtype == 'covinet':
model.load_weights('../models/covinet.h5')
elif mtype == 'covnet':
model.load_weights('../models/covnet.h5')
#############################################################################
print('Internal Validation: Mosmed')
img_dir = '../data/mosmed/val/'
test_generator = data_gen(
img_dir, slices, 1, mtype=mtype, px=px, augment=False, shuffle=False)
test_steps = len(os.listdir(img_dir))
if mtype == 'ME':
predict = model.predict(test_generator, steps=test_steps, verbose=1)[1]
else:
predict = model.predict(test_generator, steps=test_steps, verbose=1)
class_nor = np.squeeze(np.repeat(0, 85))
class_c19 = np.squeeze(np.repeat(1, 285))
true_class = np.concatenate((class_nor, class_c19), axis=0)
fpr, tpr, thresholds = roc_curve(true_class, predict, pos_label=1)
print("AUC:",auc(fpr, tpr))
np.savetxt('../results/internal/true.csv', true_class)
if mtype == "ME":
np.savetxt('../results/internal/ME.csv', predict)
elif mtype == 'Bai':
np.savetxt('../results/internal/Bai.csv', predict)
elif mtype == 'covinet':
np.savetxt('../results/internal/covinet.csv', predict)
elif mtype == 'covnet':
np.savetxt('../results/internal/covnet.csv', predict)
#############################################################################
print('External Validation: Zhang et al.')
img_dir = '../data/zhang/'
test_generator = data_gen(
img_dir, slices, 1, mtype=mtype, px=px, augment=False, shuffle=False)
test_steps = len(os.listdir(img_dir))
if mtype == 'ME':
predict = model.predict(test_generator, steps=test_steps, verbose=1)[1]
else:
predict = model.predict(test_generator, steps=test_steps, verbose=1)
class_nor = np.squeeze( | np.repeat(0, 243) | numpy.repeat |
import multiprocessing as mp
from copy import copy
import numpy as np
import tkinter
import pickle
import os
from itertools import accumulate
from matplotlib import pyplot as plt, lines
from casadi import Callback, nlpsol_out, nlpsol_n_out, Sparsity
from ..misc.data import Data
from ..misc.enums import PlotType, ControlType, InterpolationType
from ..misc.mapping import Mapping
from ..misc.utils import check_version
class CustomPlot:
def __init__(
self, update_function, plot_type=PlotType.PLOT, axes_idx=None, legend=(), combine_to=None, color=None, ylim=None, bounds=None,
):
"""
Initializes the plot.
:param update_function: Function to plot.
:param plot_type: Type of plot. (PLOT = 0, INTEGRATED = 1 or STEP = 2)
:param axes_idx: Index of the axis to be mapped. (integer)
:param legend: Legend of the graphs. (?)
:param combine_to: Plot in which to add the graph. ??
:param color: Color of the graphs. (?)
"""
self.function = update_function
self.type = plot_type
if axes_idx is None:
self.phase_mappings = None # Will be set later
elif isinstance(axes_idx, (tuple, list)):
self.phase_mappings = Mapping(axes_idx)
elif isinstance(axes_idx, Mapping):
self.phase_mappings = axes_idx
else:
raise RuntimeError("phase_mapping must be a list or a Mapping")
self.legend = legend
self.combine_to = combine_to
self.color = color
self.ylim = ylim
self.bounds = bounds
class PlotOcp:
def __init__(self, ocp, automatically_organize=True, adapt_graph_size_to_bounds=False):
"""Prepares the figure"""
for i in range(1, ocp.nb_phases):
if ocp.nlp[0]["nbQ"] != ocp.nlp[i]["nbQ"]:
raise RuntimeError("Graphs with nbQ different at each phase is not implemented yet")
self.ocp = ocp
self.plot_options = {
"general_options": {"use_tight_layout": False},
"non_integrated_plots": {"linestyle": "-.", "markersize": 3},
"integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1},
"bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"},
"grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
"vertical_lines": {"color": "k", "linestyle": "--", "linewidth": 1.2},
}
self.ydata = []
self.ns = 0
self.t = []
self.t_integrated = []
if isinstance(self.ocp.initial_phase_time, (int, float)):
self.tf = [self.ocp.initial_phase_time]
else:
self.tf = list(self.ocp.initial_phase_time)
self.t_idx_to_optimize = []
for i, nlp in enumerate(self.ocp.nlp):
if isinstance(nlp["tf"], self.ocp.CX):
self.t_idx_to_optimize.append(i)
self.__update_time_vector()
self.axes = {}
self.plots = []
self.plots_vertical_lines = []
self.plots_bounds = []
self.all_figures = []
self.automatically_organize = automatically_organize
self._organize_windows(len(self.ocp.nlp[0]["var_states"]) + len(self.ocp.nlp[0]["var_controls"]))
self.plot_func = {}
self.variable_sizes = []
self.adapt_graph_size_to_bounds = adapt_graph_size_to_bounds
self.__create_plots()
horz = 0
vert = 1 if len(self.all_figures) < self.nb_vertical_windows * self.nb_horizontal_windows else 0
for i, fig in enumerate(self.all_figures):
if self.automatically_organize:
try:
fig.canvas.manager.window.move(
int(vert * self.width_step), int(self.top_margin + horz * self.height_step)
)
vert += 1
if vert >= self.nb_vertical_windows:
horz += 1
vert = 0
except AttributeError:
pass
fig.canvas.draw()
if self.plot_options["general_options"]["use_tight_layout"]:
fig.tight_layout()
def __update_time_vector(self):
"""Sets x-axis array"""
self.t = []
self.t_integrated = []
last_t = 0
for phase_idx, nlp in enumerate(self.ocp.nlp):
nb_int_steps = nlp["nb_integration_steps"]
dt_ns = self.tf[phase_idx] / nlp["ns"]
time_phase_integrated = []
last_t_int = copy(last_t)
for _ in range(nlp["ns"]):
time_phase_integrated.append(np.linspace(last_t_int, last_t_int + dt_ns, nb_int_steps + 1))
last_t_int += dt_ns
self.t_integrated.append(time_phase_integrated)
self.ns += nlp["ns"] + 1
time_phase = np.linspace(last_t, last_t + self.tf[phase_idx], nlp["ns"] + 1)
last_t += self.tf[phase_idx]
self.t.append(time_phase)
def __create_plots(self):
"""Actually plots"""
variable_sizes = []
for i, nlp in enumerate(self.ocp.nlp):
variable_sizes.append({})
if "plot" in nlp:
for key in nlp["plot"]:
if isinstance(nlp["plot"][key], tuple):
nlp["plot"][key] = nlp["plot"][key][0]
if nlp["plot"][key].phase_mappings is None:
size = (
nlp["plot"][key]
.function(np.zeros((nlp["nx"], 1)), np.zeros((nlp["nu"], 1)), np.zeros((nlp["np"], 1)))
.shape[0]
)
nlp["plot"][key].phase_mappings = Mapping(range(size))
else:
size = len(nlp["plot"][key].phase_mappings.map_idx)
if key not in variable_sizes[i]:
variable_sizes[i][key] = size
else:
variable_sizes[i][key] = max(variable_sizes[i][key], size)
self.variable_sizes = variable_sizes
if not variable_sizes:
# No graph was setup in problem_type
return
self.plot_func = {}
for i, nlp in enumerate(self.ocp.nlp):
for variable in self.variable_sizes[i]:
nb = max(nlp["plot"][variable].phase_mappings.map_idx) + 1
nb_cols, nb_rows = PlotOcp._generate_windows_size(nb)
if nlp["plot"][variable].combine_to:
self.axes[variable] = self.axes[nlp["plot"][variable].combine_to]
axes = self.axes[variable][1]
elif i > 0 and variable in self.axes:
axes = self.axes[variable][1]
else:
axes = self.__add_new_axis(variable, nb, nb_rows, nb_cols)
self.axes[variable] = [nlp["plot"][variable], axes]
t = self.t[i]
if variable not in self.plot_func:
self.plot_func[variable] = [None] * self.ocp.nb_phases
self.plot_func[variable][i] = nlp["plot"][variable]
mapping = self.plot_func[variable][i].phase_mappings.map_idx
for ctr, k in enumerate(mapping):
ax = axes[k]
if k < len(self.plot_func[variable][i].legend):
axes[k].set_title(self.plot_func[variable][i].legend[k])
ax.grid(**self.plot_options["grid"])
ax.set_xlim(0, self.t[-1][-1])
if nlp["plot"][variable].ylim:
ax.set_ylim(nlp["plot"][variable].ylim)
elif self.adapt_graph_size_to_bounds and nlp["plot"][variable].bounds:
if nlp["plot"][variable].bounds.type != InterpolationType.CUSTOM:
y_min = nlp["plot"][variable].bounds.min[ctr].min()
y_max = nlp["plot"][variable].bounds.max[ctr].max()
else:
nlp["plot"][variable].bounds.check_and_adjust_dimensions(len(mapping), nlp["ns"])
y_min = min([nlp["plot"][variable].bounds.min.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_max = max([nlp["plot"][variable].bounds.max.evaluate_at(j)[k] for j in range(nlp["ns"])])
y_range, _ = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
zero = np.zeros((t.shape[0], 1))
plot_type = self.plot_func[variable][i].type
if plot_type == PlotType.PLOT:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:green"
self.plots.append(
[plot_type, i, ax.plot(t, zero, color=color, zorder=0, **self.plot_options["non_integrated_plots"])[0]]
)
elif plot_type == PlotType.INTEGRATED:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:brown"
plots_integrated = []
nb_int_steps = nlp["nb_integration_steps"]
for cmp in range(nlp["ns"]):
plots_integrated.append(
ax.plot(
self.t_integrated[i][cmp],
np.zeros(nb_int_steps + 1),
color=color,
**self.plot_options["integrated_plots"],
)[0]
)
self.plots.append([plot_type, i, plots_integrated])
elif plot_type == PlotType.STEP:
color = self.plot_func[variable][i].color if self.plot_func[variable][i].color else "tab:orange"
self.plots.append([plot_type, i, ax.step(t, zero, where="post", color=color, zorder=0)[0]])
else:
raise RuntimeError(f"{plot_type} is not implemented yet")
for j, ax in enumerate(axes):
intersections_time = self.find_phases_intersections()
for time in intersections_time:
self.plots_vertical_lines.append(ax.axvline(time, **self.plot_options["vertical_lines"]))
if self.axes[variable][0].bounds:
if self.axes[variable][0].bounds.type == InterpolationType.EACH_FRAME:
ns = self.axes[variable][0].bounds.min.shape[1] - 1
else:
ns = nlp["ns"]
self.axes[variable][0].bounds.check_and_adjust_dimensions(
nb_elements=len(mapping), nb_shooting=ns
)
bounds_min = np.array(
[self.axes[variable][0].bounds.min.evaluate_at(k)[j] for k in range(ns + 1)]
)
bounds_max = np.array(
[self.axes[variable][0].bounds.max.evaluate_at(k)[j] for k in range(ns + 1)]
)
if bounds_min.shape[0] == nlp["ns"]:
bounds_min = np.concatenate((bounds_min, [bounds_min[-1]]))
bounds_max = np.concatenate((bounds_max, [bounds_max[-1]]))
self.plots_bounds.append(
[ax.step(self.t[i], bounds_min, where='post', **self.plot_options["bounds"]), i]
)
self.plots_bounds.append(
[ax.step(self.t[i], bounds_max, where='post', **self.plot_options["bounds"]), i]
)
def __add_new_axis(self, variable, nb, nb_rows, nb_cols):
"""
Sets the axis of the plots.
:param variable: Variable to plot (integer)
:param nb: Number of the figure. ?? (integer)
:param nb_rows: Number of rows of plots in subplots. (integer)
:param nb_cols: Number of columns of plots in subplots. (integer)
:return: axes: Axes of the plots. (instance of subplot class)
"""
if self.automatically_organize:
self.all_figures.append(plt.figure(variable, figsize=(self.width_step / 100, self.height_step / 131)))
else:
self.all_figures.append(plt.figure(variable))
axes = self.all_figures[-1].subplots(nb_rows, nb_cols)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
for i in range(nb, len(axes)):
axes[i].remove()
axes = axes[:nb]
idx_center = nb_rows * nb_cols - int(nb_cols / 2) - 1
if idx_center >= len(axes):
idx_center = len(axes) - 1
axes[idx_center].set_xlabel("time (s)")
self.all_figures[-1].tight_layout()
return axes
def _organize_windows(self, nb_windows):
"""
Organizes esthetically the figure.
:param nb_windows: Number of variables to plot. (integer)
"""
self.nb_vertical_windows, self.nb_horizontal_windows = PlotOcp._generate_windows_size(nb_windows)
if self.automatically_organize:
height = tkinter.Tk().winfo_screenheight()
width = tkinter.Tk().winfo_screenwidth()
self.top_margin = height / 15
self.height_step = (height - self.top_margin) / self.nb_horizontal_windows
self.width_step = width / self.nb_vertical_windows
else:
self.top_margin = None
self.height_step = None
self.width_step = None
def find_phases_intersections(self):
"""Finds the intersection between phases"""
return list(accumulate(self.tf))[:-1]
@staticmethod
def show():
plt.show()
def update_data(self, V):
"""Update of the variable V to plot (dependent axis)"""
self.ydata = []
data_states, data_controls, data_param = Data.get_data(
self.ocp, V, get_parameters=True, integrate=True, concatenate=False
)
data_param_in_dyn = np.array([data_param[key] for key in data_param if key != "time"]).squeeze()
for _ in self.ocp.nlp:
if self.t_idx_to_optimize:
for i_in_time, i_in_tf in enumerate(self.t_idx_to_optimize):
self.tf[i_in_tf] = data_param["time"][i_in_time]
self.__update_xdata()
data_states_per_phase, data_controls_per_phase = Data.get_data(self.ocp, V, integrate=True, concatenate=False)
for i, nlp in enumerate(self.ocp.nlp):
step_size = nlp["nb_integration_steps"] + 1
nb_elements = nlp["ns"] * step_size + 1
state = np.ndarray((0, nb_elements))
for s in nlp["var_states"]:
if isinstance(data_states_per_phase[s], (list, tuple)):
state = np.concatenate((state, data_states_per_phase[s][i]))
else:
state = np.concatenate((state, data_states_per_phase[s]))
control = np.ndarray((0, nlp["ns"] + 1))
for s in nlp["var_controls"]:
if isinstance(data_controls_per_phase[s], (list, tuple)):
control = np.concatenate((control, data_controls_per_phase[s][i]))
else:
control = np.concatenate((control, data_controls_per_phase[s]))
if nlp["control_type"] == ControlType.CONSTANT:
u_mod = 1
elif nlp["control_type"] == ControlType.LINEAR_CONTINUOUS:
u_mod = 2
else:
raise NotImplementedError(f"Plotting {nlp['control_type']} is not implemented yet")
for key in self.variable_sizes[i]:
if self.plot_func[key][i].type == PlotType.INTEGRATED:
all_y = []
for idx, t in enumerate(self.t_integrated[i]):
y_tp = np.empty((self.variable_sizes[i][key], len(t)))
y_tp.fill(np.nan)
y_tp[:, :] = self.plot_func[key][i].function(
state[:, step_size * idx : step_size * (idx + 1)],
control[:, idx : idx + u_mod],
data_param_in_dyn,
)
all_y.append(y_tp)
for idx in range(len(self.plot_func[key][i].phase_mappings.map_idx)):
y_tp = []
for y in all_y:
y_tp.append(y[idx, :])
self.__append_to_ydata([y_tp])
else:
y = np.empty((self.variable_sizes[i][key], len(self.t[i])))
y.fill(np.nan)
y[:, :] = self.plot_func[key][i].function(state[:, ::step_size], control, data_param_in_dyn)
self.__append_to_ydata(y)
self.__update_axes()
def __update_xdata(self):
"""Update of the time in plots (independent axis)"""
self.__update_time_vector()
for plot in self.plots:
phase_idx = plot[1]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_xdata(self.t_integrated[phase_idx][cmp])
ax = plot[2][-1].axes
else:
plot[2].set_xdata(self.t[phase_idx])
ax = plot[2].axes
ax.set_xlim(0, self.t[-1][-1])
if self.plots_bounds:
for plot_bounds in self.plots_bounds:
plot_bounds[0][0].set_xdata(self.t[plot_bounds[1]])
ax = plot_bounds[0][0].axes
ax.set_xlim(0, self.t[-1][-1])
intersections_time = self.find_phases_intersections()
n = len(intersections_time)
if n > 0:
for p in range(int(len(self.plots_vertical_lines) / n)):
for i, time in enumerate(intersections_time):
self.plots_vertical_lines[p * n + i].set_xdata([time, time])
def __append_to_ydata(self, data):
for y in data:
self.ydata.append(y)
def __update_axes(self):
"""Updates axes ranges"""
assert len(self.plots) == len(self.ydata)
for i, plot in enumerate(self.plots):
y = self.ydata[i]
if plot[0] == PlotType.INTEGRATED:
for cmp, p in enumerate(plot[2]):
p.set_ydata(y[cmp])
else:
plot[2].set_ydata(y)
for p in self.plots_vertical_lines:
p.set_ydata((np.nan, np.nan))
for key in self.axes:
if not self.adapt_graph_size_to_bounds:
for i, ax in enumerate(self.axes[key][1]):
if not self.axes[key][0].ylim:
y_max = -np.inf
y_min = np.inf
children_list = [p for p in ax.get_children() if isinstance(p, lines.Line2D)]
for p in children_list[:-2]:
y_min = min(y_min, np.min(p.get_ydata()))
y_max = max(y_max, np.max(p.get_ydata()))
y_range, data_range = self.__compute_ylim(y_min, y_max, 1.25)
ax.set_ylim(y_range)
ax.set_yticks(np.arange(y_range[0], y_range[1], step=data_range / 4,))
for p in self.plots_vertical_lines:
p.set_ydata((0, 1))
@staticmethod
def __compute_ylim(min_val, max_val, threshold):
if np.isnan(min_val) or np.isinf(min_val):
min_val = 0
if np.isnan(max_val) or np.isinf(max_val):
max_val = 1
data_mean = | np.mean((min_val, max_val)) | numpy.mean |
# coding: utf-8
#/*##########################################################################
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
########################################################################### */
"""This modules defines a set of background model functions and associated
estimation functions in a format that can be imported into a
:class:`silx.math.fit.FitManager` object.
A background function is a function that you want to add to a regular fit
function prior to fitting the sum of both functions. This is useful, for
instance, if you need to fit multiple gaussian peaks in an array of
measured data points when the measurement is polluted by a background signal.
The models include common background models such as a constant value or a
linear background.
It also includes background computation filters - *strip* and *snip* - that
can extract a more complex low-curvature background signal from a signal with
peaks having higher curvatures.
The source code of this module can serve as a template for defining your
own fit background theories. The minimal skeleton of such a theory definition
file is::
from silx.math.fit.fittheory import FitTheory
def bgfunction1(x, y0, …):
bg_signal = …
return bg_signal
def estimation_function1(x, y):
…
estimated_params = …
constraints = …
return estimated_params, constraints
THEORY = {
'bg_theory_name1': FitTheory(
description='Description of theory 1',
function=bgfunction1,
parameters=('param name 1', 'param name 2', …),
estimate=estimation_function1,
configure=configuration_function1,
derivative=derivative_function1,
is_background=True),
'theory_name_2': …,
}
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "16/01/2017"
from collections import OrderedDict
import numpy
from silx.math.fit.filters import strip, snip1d,\
savitsky_golay
from silx.math.fit.fittheory import FitTheory
CONFIG = {
"SmoothingFlag": False,
"SmoothingWidth": 5,
"AnchorsFlag": False,
"AnchorsList": [],
"StripWidth": 2,
"StripIterations": 5000,
"StripThresholdFactor": 1.0,
"SnipWidth": 16,
"EstimatePolyOnStrip": True
}
# to avoid costly computations when parameters stay the same
_BG_STRIP_OLDY = numpy.array([])
_BG_STRIP_OLDPARS = [0, 0]
_BG_STRIP_OLDBG = | numpy.array([]) | numpy.array |
# Author: <NAME>
# ENF estimation from video recordings using Rolling Shutter Mechanism
# Import required packages
import numpy as np
import cv2
import pickle
import pyenf
#from scipy import signal, io
import scipy.io.wavfile
import math
from scipy.fftpack import fftshift
import matplotlib.pyplot as plt
import librosa
from skimage.util import img_as_float
from skimage.segmentation import slic
from scipy.stats.stats import pearsonr
from scipy.signal import butter, lfilter, freqz
video_folder = "/home/deeraj/Documents/Projects/pyENF_extraction_rolling_shutter/Recordings/"
power_rec_name = "power_20min.wav"
power_filepath = video_folder + power_rec_name
dup_power_rec_name = "power_20min2.wav"
dup_power_filepath = video_folder + dup_power_rec_name
def correlation_vector(ENF_signal1, ENF_signal2, window_size, shift_size):
correlation_ENF = []
length_of_signal = min(len(ENF_signal1), len(ENF_signal2))
total_windows = math.ceil(( length_of_signal - window_size + 1) / shift_size)
rho = | np.zeros((1,total_windows)) | numpy.zeros |
import numpy as np
from ukfm import SO3
import matplotlib.pyplot as plt
class ATTITUDE:
"""3D attitude estimation from an IMU equipped with gyro, accelerometer and
magnetometer. See text description in :cite:`kokUsing2017`, Section IV.
:arg T: sequence time (s).
:arg imu_freq: IMU frequency (Hz).
"""
g = np.array([0, 0, -9.82])
"gravity vector (m/s^2) :math:`\\mathbf{g}`."
b = np.array([0.33, 0, -0.95])
"normed magnetic field in Sweden :math:`\\mathbf{b}`."
class STATE:
"""State of the system.
It represents the orientation of the platform.
.. math::
\\boldsymbol{\\chi} \in \\mathcal{M} = \\left\\{
\\mathbf{C} \in SO(3) \\right\\}
:ivar Rot: rotation matrix :math:`\mathbf{C}`.
"""
def __init__(self, Rot):
self.Rot = Rot
class INPUT:
"""Input of the propagation model.
The input is the gyro measurement from an Inertial Measurement Unit
(IMU).
.. math::
\\boldsymbol{\\omega} \in \\mathcal{U} = \\left\\{
\\mathbf{u} \in \\mathbb R^3 \\right\\}
:ivar gyro: 3D gyro :math:`\mathbf{u}`.
"""
def __init__(self, gyro):
self.gyro = gyro
def __init__(self, T, imu_freq):
# sequence time (s)
self.T = T
# IMU frequency (Hz)
self.imu_freq = imu_freq
# total number of timestamps
self.N = T*imu_freq
# integration step (s)
self.dt = 1/imu_freq
@classmethod
def f(cls, state, omega, w, dt):
""" Propagation function.
.. math::
\\mathbf{C}_{n+1} = \\mathbf{C}_{n}
\\exp\\left(\\left(\\mathbf{u} + \\mathbf{w} \\right)
dt \\right)
:var state: state :math:`\\boldsymbol{\\chi}`.
:var omega: input :math:`\\boldsymbol{\\omega}`.
:var w: noise :math:`\\mathbf{w}`.
:var dt: integration step :math:`dt` (s).
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp((omega.gyro + w)*dt)),
)
return new_state
@classmethod
def h(cls, state):
""" Observation function.
.. math::
h\\left(\\boldsymbol{\\chi}\\right) = \\begin{bmatrix}
\\mathbf{C}^T \\mathbf{g} \\\\
\\mathbf{C}^T \\mathbf{b}
\\end{bmatrix}
:var state: state :math:`\\boldsymbol{\\chi}`.
"""
y = np.hstack([state.Rot.T.dot(cls.g),
state.Rot.T.dot(cls.b)])
return y
@classmethod
def phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\mathbf{C} \\exp\\left(\\boldsymbol{\\xi}\\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with left multiplication.
Its corresponding inverse operation is :meth:`~ukfm.ATTITUDE.phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=state.Rot.dot(SO3.exp(xi))
)
return new_state
@classmethod
def phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\log\\left(
\\boldsymbol{\chi}^{-1} \\boldsymbol{\\hat{\\chi}} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with left multiplication.
Its corresponding retraction is :meth:`~ukfm.ATTITUDE.phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = SO3.log(state.Rot.T.dot(hat_state.Rot))
return xi
@classmethod
def right_phi(cls, state, xi):
"""Retraction.
.. math::
\\varphi\\left(\\boldsymbol{\\chi}, \\boldsymbol{\\xi}\\right) =
\\exp\\left(\\boldsymbol{\\xi}\\right) \\mathbf{C}
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with right multiplication.
Its corresponding inverse operation is
:meth:`~ukfm.ATTITUDE.right_phi_inv`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var xi: state uncertainty :math:`\\boldsymbol{\\xi}`.
"""
new_state = cls.STATE(
Rot=SO3.exp(xi).dot(state.Rot)
)
return new_state
@classmethod
def right_phi_inv(cls, state, hat_state):
"""Inverse retraction.
.. math::
\\varphi^{-1}_{\\boldsymbol{\\hat{\\chi}}}\\left(\\boldsymbol{\\chi}
\\right) = \\log\\left(
\\boldsymbol{\\hat{\\chi}}\\boldsymbol{\chi}^{-1} \\right)
The state is viewed as a element :math:`\\boldsymbol{\chi} \\in SO(3)`
with right multiplication.
Its corresponding retraction is :meth:`~ukfm.ATTITUDE.right_phi`.
:var state: state :math:`\\boldsymbol{\\chi}`.
:var hat_state: noise-free state :math:`\\boldsymbol{\hat{\\chi}}`.
"""
xi = SO3.log(hat_state.Rot.dot(state.Rot.T))
return xi
@classmethod
def ekf_FG_ana(cls, state, omega, dt):
F = np.eye(3)
G = dt*state.Rot
return F, G
@classmethod
def ekf_H_ana(cls, state):
H = np.vstack([state.Rot.T.dot(SO3.wedge(cls.g)),
state.Rot.T.dot(SO3.wedge(cls.b))])
return H
def simu_f(self, imu_std):
# The robot is 2 s stationary and then have constant angular velocity
# around gravity
n_T = 0 # increment for angular velocity
omega_T = np.zeros(3) # first velocity (robot is first stationary)
omega_move = np.array([0, 0, 10/180*np.pi])
# set noise to zero to compute true trajectory
w = np.zeros(3)
# init variables at zero and do for loop
omegas = []
states = [self.STATE(Rot=np.eye(3))]
for n in range(1, self.N):
# change true input
if n_T > 2:
omega_T = omega_move
n_T = n_T + self.dt
# true input
omegas.append(self.INPUT(omega_T))
# propagate state
states.append(self.f(states[n-1], omegas[n-1], w, self.dt))
# noisy input
omegas[n-1].gyro = omegas[n-1].gyro + imu_std[0]*np.random.randn(3)
return states, omegas
def simu_h(self, state, imu_std):
# total number of timestamps
y = np.zeros((self.N, 6))
for n in range(self.N):
y[n, :3] = state[n].Rot.T.dot(
self.g + imu_std[1]*np.random.randn(3))
y[n, 3:] = state[n].Rot.T.dot(
self.b + imu_std[2]*np.random.randn(3))
return y
def plot_results(self, hat_states, hat_Ps, states, omegas):
Rots, rpys = self.get_states(states, self.N)
hat_Rots, hat_rpys = self.get_states(hat_states, self.N)
errors = self.errors(Rots, hat_Rots)
t = np.linspace(0, self.T, self.N)
ukf3sigma = 3*np.vstack([np.sqrt(hat_Ps[:, 0, 0]),
np.sqrt(hat_Ps[:, 1, 1]),
np.sqrt(hat_Ps[:, 2, 2])])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='orientation (deg)',
title='Orientation')
plt.plot(t, 180/np.pi*rpys[:, 0], c='red')
plt.plot(t, 180/np.pi*rpys[:, 1], c='yellow')
plt.plot(t, 180/np.pi*rpys[:, 2], linewidth=2, c='black')
ax.legend([r'roll', 'pitch', 'yaw'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='Roll error (deg)',
title='Roll error (deg)')
plt.plot(t, 180/np.pi*errors[:, 0], c='blue')
plt.plot(t, 180/np.pi*ukf3sigma[0, :], c='blue', linestyle='dashed')
plt.plot(t, -180/np.pi*ukf3sigma[0, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(9, 6))
ax.set(xlabel='$t$ (s)', ylabel='Pitch error (deg)',
title='Pitch error (deg)')
plt.plot(t, 180/np.pi*errors[:, 1], c='blue')
plt.plot(t, 180/np.pi*ukf3sigma[1, :], c='blue', linestyle='dashed')
plt.plot(t, -180/np.pi*ukf3sigma[1, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='Yaw error (deg)',
title='Yaw error (deg)')
plt.plot(t, 180/np.pi*errors[:, 2], c='blue')
plt.plot(t, 180/np.pi*ukf3sigma[2, :], c='blue', linestyle='dashed')
plt.plot(t, -180/np.pi*ukf3sigma[2, :], c='blue', linestyle='dashed')
ax.legend([r'UKF', r'$3\sigma$ UKF'])
ax.set_xlim(0, t[-1])
@classmethod
def get_states(cls, states, N):
Rots = np.zeros((N, 3, 3))
rpys = np.zeros((N, 3))
for n in range(N):
Rots[n] = states[n].Rot
rpys[n] = SO3.to_rpy(states[n].Rot)
return Rots, rpys
@classmethod
def errors(cls, Rots, hat_Rots):
N = Rots.shape[0]
errors = np.zeros((N, 3))
# get true states and estimates, and orientation error
for n in range(N):
errors[n] = SO3.log(Rots[n].dot(hat_Rots[n].T))
return errors
def benchmark_print(self, left_ukf_err, right_ukf_err, ekf_err):
def rmse(errs):
return np.sqrt(np.mean(errs ** 2))
def f(x):
# average over Monte-Carlo and angles
return np.sqrt(np.mean(np.sum(x ** 2, axis=2), axis=0))
t = np.linspace(0, self.T, self.N)
fig, ax = plt.subplots(figsize=(10, 6))
ax.set(xlabel='$t$ (s)', ylabel='error (deg)',
title="Orientation error(deg)")
plt.plot(t, 180/np.pi*f(left_ukf_err), c='green')
plt.plot(t, 180/np.pi*f(right_ukf_err), c='cyan')
plt.plot(t, 180/np.pi*f(ekf_err), c='red')
ax.legend([r'\textbf{left UKF}', r'\textbf{right UKF}', r'EKF'])
ax.set_ylim(bottom=0)
ax.set_xlim(0, t[-1])
left_ukf_err_rot = '{:.2f}'.format(180/np.pi*rmse(left_ukf_err))
right_ukf_err_rot = '{:.2f}'.format(180/np.pi*rmse(right_ukf_err))
ekf_err_rot = '{:.2f}'.format(180/np.pi*rmse(ekf_err))
print(' ')
print('Root Mean Square Error w.r.t. orientation (deg)')
print(" -left UKF : " + left_ukf_err_rot)
print(" -right UKF : " + right_ukf_err_rot)
print(" -EKF : " + ekf_err_rot)
def nees(self, err, Ps, Rots, name):
neess = np.zeros(self.N)
def err2nees(err, P):
return err.dot( | np.linalg.inv(P) | numpy.linalg.inv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 27 13:53:50 2016
@author: au194693
"""
import numpy as np
import scipy.io as sio
import pandas as pd
from my_settings import *
data = sio.loadmat(data_path + "behavoural_results.mat")["data_all"]
b_df = pd.DataFrame()
for j in range(len(data)):
baseline = data[j, 0].mean()
invol_trials = data[j, 3].squeeze()
if len(invol_trials) is 90:
invol_trials = invol_trials[1:]
error = (np.std(data[j, 3]) * 2 + invol_trials.mean(),
- | np.std(data[j, 3]) | numpy.std |
# Script to crop the images centered on a computed point
import os, glob
import cv2 as cv
import numpy as np
from PIL import Image
from PIL.TiffTags import TAGS
from pprint import pprint
def main():
target_folder = "D:/Research/ModeTransformation/Data/2019_02_19/I1060/"
file_mask = "I1060P*A*" #"I0000A*P040*"
file_extension = ".tif"
results_folder = os.path.join(target_folder, "cropped")
height, width = (512, 640)
crop_height, crop_width = (250, 250) # 160x160
if not os.path.exists(results_folder):
os.makedirs(results_folder)
images_list = glob.glob(os.path.join(target_folder, file_mask+file_extension))
print("Processing the following list of images: ")
print([[x] for x in images_list])
reference_8bit = cv.imread(os.path.join(target_folder, "REF.tif"), cv.IMREAD_GRAYSCALE)
reference = cv.imread(os.path.join(target_folder, "REF.tif"), cv.IMREAD_UNCHANGED)
for itr, item in enumerate(images_list):
print("Processing image: ", item)
image = cv.subtract(cv.imread(item, cv.IMREAD_UNCHANGED), reference)
array = np.array(image)
image_8bit = cv.imread(item, cv.IMREAD_GRAYSCALE)
filtered = cv.subtract(image_8bit, reference_8bit)
ret2, binary = cv.threshold(filtered, 4, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY + cv.THRESH_OTSU)
binary_array = np.array(binary)
# Pad arrays to avoid border inaccuracies
array = np.pad(array, ((128, 128), (160, 160)), 'constant', constant_values=0)
binary_array = np.pad(binary_array, ((128, 128), (160, 160)), 'constant', constant_values=0)
# cv.imshow("filtered", binary_array)
# cv.waitKey(0)
yc, xc = compute_centroid(binary_array)
array_cropped = array[int(xc - crop_height * 0.5):int(xc + crop_height * 0.5),
int(yc - crop_height * 0.5):int(yc + crop_height * 0.5)]
current_file = os.path.splitext(os.path.basename(item))[0]
current_filename = os.path.join(results_folder, current_file+"_crop"+file_extension)
print("Writting image to file: ", current_filename)
cv.imwrite(current_filename, array_cropped)
def compute_centroid(array):
""""Compute centroid of the array"""
X, Y = np.meshgrid(np.linspace(0,array.shape[1]-1, array.shape[1]), np.linspace(0,array.shape[0]-1, array.shape[0]))
X += 0.5
Y += 0.5
centroid_x = np.sum(array * X) / | np.sum(array) | numpy.sum |
import numpy as np
from scipy.special import gammaln
from math import exp
from collections import Counter
from .crp import CRP
def Z(corpus_s, topic, alpha, beta):
n_vocab = sum([len(x) for x in corpus_s])
t_zm = np.zeros(n_vocab).astype('int')
z_topic = [[] for _ in topic]
z_doc = [[] for _ in topic]
z_tmp = np.zeros((n_vocab, len(topic)))
assigned = np.zeros((len(corpus_s), len(topic)))
n = 0
for i in range(len(corpus_s)):
for d in range(len(corpus_s[i])):
wi = corpus_s[i][d]
for j in range(len(topic)):
lik = (z_topic[j].count(wi) + beta) / (assigned[i, j] + n_vocab * beta)
pri = (len(z_topic[j]) + alpha) / ((len(corpus_s[i]) - 1) + len(topic) * alpha)
z_tmp[n, j] = lik * pri
t_zm[n] = np.random.multinomial(1, (z_tmp[n,:]/sum(z_tmp[n,:]))).argmax()
z_topic[t_zm[n]].append(wi)
z_doc[t_zm[n]].append(i)
assigned[i, t_zm[n]] += 1
n += 1
z_topic = [x for x in z_topic if x != []]
z_doc = [x for x in z_doc if x != []]
return z_topic, z_doc
def most_common(x):
return Counter(x).most_common(1)[0][0]
def CRP_prior(corpus_s, doc, phi):
cp = np.empty((len(corpus_s), len(doc)))
for i, corpus in enumerate(corpus_s):
p_topic = [[x for x in doc[j] if x != i] for j in range(len(doc))]
tmp = CRP(p_topic, phi)
cp[i,:] = tmp[1:]
return cp
def likelihood(corpus_s, topic, eta):
w_m = np.empty((len(corpus_s), len(topic)))
allword_topic = [word for t in topic for word in t]
n_vocab = sum([len(x) for x in corpus_s])
for i, corpus in enumerate(corpus_s):
prob_result = []
for j in range(len(topic)):
current_topic = topic[j]
n_word_topic = len(current_topic)
prev_dominator = 1
later_numerator = 1
prob_word = 1
overlap = [val for val in set(corpus) if val in current_topic]
prev_numerator = gammaln(len(current_topic) - len(overlap) + n_vocab * eta)
later_dominator = gammaln(len(current_topic) + n_vocab * eta)
for word in corpus:
corpus_list = corpus
if current_topic.count(word) - corpus_list.count(word) < 0 :
a = 0
else:
a = current_topic.count(word) - corpus_list.count(word)
prev_dominator += gammaln(a + eta)
later_numerator += gammaln(current_topic.count(word) + eta)
prev = prev_numerator - prev_dominator
later = later_numerator - later_dominator
like = prev + later
w_m[i, j] = like
w_m[i, :] = np.add(w_m[i, :], abs(min(w_m[i, :]))+0.1)
w_m = w_m/w_m.sum(axis = 1)[:, np.newaxis]
return w_m
def post(w_m, c_p):
c_m = (w_m * c_p) / (w_m * c_p).sum(axis = 1)[:, np.newaxis]
return np.array(c_m)
def wn(c_m, corpus_s):
wn_ass = []
for i, corpus in enumerate(corpus_s):
for word in corpus:
if c_m[i].sum != 1:
c_m[i] = c_m[i]/c_m[i].sum()
theta = | np.random.multinomial(1, c_m[i]) | numpy.random.multinomial |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import math
import os
import pickle
import unittest
from copy import deepcopy
import warnings
import numpy as np
import numpy.ma as ma
from obspy import Stream, Trace, UTCDateTime, __version__, read, read_inventory
from obspy.core import Stats
from obspy.core.compatibility import mock
from obspy.core.util.testing import ImageComparison
from obspy.io.xseed import Parser
class TraceTestCase(unittest.TestCase):
"""
Test suite for obspy.core.trace.Trace.
"""
@staticmethod
def __remove_processing(tr):
"""
Removes all processing information in the trace object.
Useful for testing.
"""
if "processing" not in tr.stats:
return
del tr.stats.processing
def test_init(self):
"""
Tests the __init__ method of the Trace class.
"""
# NumPy ndarray
tr = Trace(data=np.arange(4))
self.assertEqual(len(tr), 4)
# NumPy masked array
data = np.ma.array([0, 1, 2, 3], mask=[True, True, False, False])
tr = Trace(data=data)
self.assertEqual(len(tr), 4)
# other data types will raise
self.assertRaises(ValueError, Trace, data=[0, 1, 2, 3])
self.assertRaises(ValueError, Trace, data=(0, 1, 2, 3))
self.assertRaises(ValueError, Trace, data='1234')
def test_setattr(self):
"""
Tests the __setattr__ method of the Trace class.
"""
# NumPy ndarray
tr = Trace()
tr.data = np.arange(4)
self.assertEqual(len(tr), 4)
# NumPy masked array
tr = Trace()
tr.data = np.ma.array([0, 1, 2, 3], mask=[True, True, False, False])
self.assertEqual(len(tr), 4)
# other data types will raise
tr = Trace()
self.assertRaises(ValueError, tr.__setattr__, 'data', [0, 1, 2, 3])
self.assertRaises(ValueError, tr.__setattr__, 'data', (0, 1, 2, 3))
self.assertRaises(ValueError, tr.__setattr__, 'data', '1234')
def test_len(self):
"""
Tests the __len__ and count methods of the Trace class.
"""
trace = Trace(data=np.arange(1000))
self.assertEqual(len(trace), 1000)
self.assertEqual(trace.count(), 1000)
def test_mul(self):
"""
Tests the __mul__ method of the Trace class.
"""
tr = Trace(data=np.arange(10))
st = tr * 5
self.assertEqual(len(st), 5)
# you may only multiply using an integer
self.assertRaises(TypeError, tr.__mul__, 2.5)
self.assertRaises(TypeError, tr.__mul__, '1234')
def test_div(self):
"""
Tests the __div__ method of the Trace class.
"""
tr = Trace(data=np.arange(1000))
st = tr / 5
self.assertEqual(len(st), 5)
self.assertEqual(len(st[0]), 200)
# you may only multiply using an integer
self.assertRaises(TypeError, tr.__div__, 2.5)
self.assertRaises(TypeError, tr.__div__, '1234')
def test_ltrim(self):
"""
Tests the ltrim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1000))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 4, 995000)
# verify
trace.verify()
# UTCDateTime/int/float required
self.assertRaises(TypeError, trace._ltrim, '1234')
self.assertRaises(TypeError, trace._ltrim, [1, 2, 3, 4])
# ltrim 100 samples
tr = deepcopy(trace)
tr._ltrim(0.5)
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([100, 101, 102, 103, 104]))
self.assertEqual(len(tr.data), 900)
self.assertEqual(tr.stats.npts, 900)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 0.5)
self.assertEqual(tr.stats.endtime, end)
# ltrim 202 samples
tr = deepcopy(trace)
tr._ltrim(1.010)
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([202, 203, 204, 205, 206]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 1.010)
self.assertEqual(tr.stats.endtime, end)
# ltrim to UTCDateTime
tr = deepcopy(trace)
tr._ltrim(UTCDateTime(2000, 1, 1, 0, 0, 1, 10000))
tr.verify()
np.testing.assert_array_equal(tr.data[0:5],
np.array([202, 203, 204, 205, 206]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start + 1.010)
self.assertEqual(tr.stats.endtime, end)
# some sanity checks
# negative start time as datetime
tr = deepcopy(trace)
tr._ltrim(start - 1, pad=True)
tr.verify()
self.assertEqual(tr.stats.starttime, start - 1)
np.testing.assert_array_equal(trace.data, tr.data[200:])
self.assertEqual(tr.stats.endtime, trace.stats.endtime)
# negative start time as integer
tr = deepcopy(trace)
tr._ltrim(-100, pad=True)
tr.verify()
self.assertEqual(tr.stats.starttime, start - 100)
delta = 100 * trace.stats.sampling_rate
np.testing.assert_array_equal(trace.data, tr.data[int(delta):])
self.assertEqual(tr.stats.endtime, trace.stats.endtime)
# start time > end time
tr = deepcopy(trace)
tr._ltrim(trace.stats.endtime + 100)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.endtime + 100)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# start time == end time
tr = deepcopy(trace)
tr._ltrim(5)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.starttime + 5)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# start time == end time
tr = deepcopy(trace)
tr._ltrim(5.1)
tr.verify()
self.assertEqual(tr.stats.starttime,
trace.stats.starttime + 5.1)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
def test_rtrim(self):
"""
Tests the rtrim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1000))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 4, 995000)
trace.verify()
# UTCDateTime/int/float required
self.assertRaises(TypeError, trace._rtrim, '1234')
self.assertRaises(TypeError, trace._rtrim, [1, 2, 3, 4])
# rtrim 100 samples
tr = deepcopy(trace)
tr._rtrim(0.5)
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([895, 896, 897, 898, 899]))
self.assertEqual(len(tr.data), 900)
self.assertEqual(tr.stats.npts, 900)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 0.5)
# rtrim 202 samples
tr = deepcopy(trace)
tr._rtrim(1.010)
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([793, 794, 795, 796, 797]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 1.010)
# rtrim 1 minute via UTCDateTime
tr = deepcopy(trace)
tr._rtrim(UTCDateTime(2000, 1, 1, 0, 0, 3, 985000))
tr.verify()
np.testing.assert_array_equal(tr.data[-5:],
np.array([793, 794, 795, 796, 797]))
self.assertEqual(len(tr.data), 798)
self.assertEqual(tr.stats.npts, 798)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, end - 1.010)
# some sanity checks
# negative end time
tr = deepcopy(trace)
t = UTCDateTime(1999, 12, 31)
tr._rtrim(t)
tr.verify()
self.assertEqual(tr.stats.endtime, t)
np.testing.assert_array_equal(tr.data, np.empty(0))
# negative end time with given seconds
tr = deepcopy(trace)
tr._rtrim(100)
tr.verify()
self.assertEqual(tr.stats.endtime, trace.stats.endtime - 100)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# end time > start time
tr = deepcopy(trace)
t = UTCDateTime(2001)
tr._rtrim(t)
tr.verify()
self.assertEqual(tr.stats.endtime, t)
np.testing.assert_array_equal(tr.data, np.empty(0))
self.assertEqual(tr.stats.endtime, tr.stats.starttime)
# end time > start time given seconds
tr = deepcopy(trace)
tr._rtrim(5.1)
tr.verify()
delta = int(math.floor(round(5.1 * trace.stats.sampling_rate, 7)))
endtime = trace.stats.starttime + trace.stats.delta * \
(trace.stats.npts - delta - 1)
self.assertEqual(tr.stats.endtime, endtime)
np.testing.assert_array_equal(tr.data, np.empty(0))
# end time == start time
# returns one sample!
tr = deepcopy(trace)
tr._rtrim(4.995)
tr.verify()
np.testing.assert_array_equal(tr.data, np.array([0]))
self.assertEqual(len(tr.data), 1)
self.assertEqual(tr.stats.npts, 1)
self.assertEqual(tr.stats.sampling_rate, 200.0)
self.assertEqual(tr.stats.starttime, start)
self.assertEqual(tr.stats.endtime, start)
def test_rtrim_with_padding(self):
"""
Tests the _rtrim() method of the Trace class with padding. It has
already been tested in the two sided trimming tests. This is just to
have an explicit test. Also tests issue #429.
"""
# set up
trace = Trace(data=np.arange(10))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 1.0
trace.verify()
# Pad with no fill_value will mask the additional values.
tr = trace.copy()
end = tr.stats.endtime
tr._rtrim(end + 10, pad=True)
self.assertEqual(tr.stats.endtime, trace.stats.endtime + 10)
np.testing.assert_array_equal(tr.data[0:10], np.arange(10))
# Check that the first couple of entries are not masked.
self.assertFalse(tr.data[0:10].mask.any())
# All the other entries should be masked.
self.assertTrue(tr.data[10:].mask.all())
# Pad with fill_value.
tr = trace.copy()
end = tr.stats.endtime
tr._rtrim(end + 10, pad=True, fill_value=-33)
self.assertEqual(tr.stats.endtime, trace.stats.endtime + 10)
# The first ten entries should not have changed.
np.testing.assert_array_equal(tr.data[0:10], np.arange(10))
# The rest should be filled with the fill_value.
np.testing.assert_array_equal(tr.data[10:], np.ones(10) * -33)
def test_trim(self):
"""
Tests the trim method of the Trace class.
"""
# set up
trace = Trace(data=np.arange(1001))
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
trace.stats.starttime = start
trace.stats.sampling_rate = 200.0
end = UTCDateTime(2000, 1, 1, 0, 0, 5, 0)
trace.verify()
# rtrim 100 samples
trace.trim(0.5, 0.5)
trace.verify()
np.testing.assert_array_equal(trace.data[-5:],
np.array([896, 897, 898, 899, 900]))
np.testing.assert_array_equal(trace.data[:5],
np.array([100, 101, 102, 103, 104]))
self.assertEqual(len(trace.data), 801)
self.assertEqual(trace.stats.npts, 801)
self.assertEqual(trace.stats.sampling_rate, 200.0)
self.assertEqual(trace.stats.starttime, start + 0.5)
self.assertEqual(trace.stats.endtime, end - 0.5)
# start time should be before end time
self.assertRaises(ValueError, trace.trim, end, start)
def test_trim_all_does_not_change_dtype(self):
"""
If a Trace is completely trimmed, e.g. no data samples are remaining,
the dtype should remain unchanged.
A trace with no data samples is not really senseful but the dtype
should not be changed anyways.
"""
# Choose non native dtype.
tr = Trace(np.arange(100, dtype=np.int16))
tr.trim(UTCDateTime(10000), UTCDateTime(20000))
# Assert the result.
self.assertEqual(len(tr.data), 0)
self.assertEqual(tr.data.dtype, np.int16)
def test_add_trace_with_gap(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(0, 1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 10
# verify
tr1.verify()
tr2.verify()
# add
trace = tr1 + tr2
# stats
self.assertEqual(trace.stats.starttime, start)
self.assertEqual(trace.stats.endtime, start + 14.995)
self.assertEqual(trace.stats.sampling_rate, 200)
self.assertEqual(trace.stats.npts, 3000)
# data
self.assertEqual(len(trace), 3000)
self.assertEqual(trace[0], 0)
self.assertEqual(trace[999], 999)
self.assertTrue(ma.is_masked(trace[1000]))
self.assertTrue(ma.is_masked(trace[1999]))
self.assertEqual(trace[2000], 999)
self.assertEqual(trace[2999], 0)
# verify
trace.verify()
def test_add_trace_with_overlap(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(0, 1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 4
# add
trace = tr1 + tr2
# stats
self.assertEqual(trace.stats.starttime, start)
self.assertEqual(trace.stats.endtime, start + 8.995)
self.assertEqual(trace.stats.sampling_rate, 200)
self.assertEqual(trace.stats.npts, 1800)
# data
self.assertEqual(len(trace), 1800)
self.assertEqual(trace[0], 0)
self.assertEqual(trace[799], 799)
self.assertTrue(trace[800].mask)
self.assertTrue(trace[999].mask)
self.assertEqual(trace[1000], 799)
self.assertEqual(trace[1799], 0)
# verify
trace.verify()
def test_add_same_trace(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1001))
# add
trace = tr1 + tr1
# should return exact the same values
self.assertEqual(trace.stats, tr1.stats)
np.testing.assert_array_equal(trace.data, tr1.data)
# verify
trace.verify()
def test_add_trace_within_trace(self):
"""
Tests __add__ method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1001))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(201))
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 1
# add
trace = tr1 + tr2
# should return exact the same values like trace 1
self.assertEqual(trace.stats, tr1.stats)
mask = np.zeros(len(tr1)).astype(np.bool_)
mask[200:401] = True
np.testing.assert_array_equal(trace.data.mask, mask)
np.testing.assert_array_equal(trace.data.data[:200], tr1.data[:200])
np.testing.assert_array_equal(trace.data.data[401:], tr1.data[401:])
# add the other way around
trace = tr2 + tr1
# should return exact the same values like trace 1
self.assertEqual(trace.stats, tr1.stats)
np.testing.assert_array_equal(trace.data.mask, mask)
np.testing.assert_array_equal(trace.data.data[:200], tr1.data[:200])
np.testing.assert_array_equal(trace.data.data[401:], tr1.data[401:])
# verify
trace.verify()
def test_add_gap_and_overlap(self):
"""
Test order of merging traces.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 4
tr3 = Trace(data=np.arange(1000)[::-1])
tr3.stats.sampling_rate = 200
tr3.stats.starttime = start + 12
# overlap
overlap = tr1 + tr2
self.assertEqual(len(overlap), 1800)
mask = np.zeros(1800).astype(np.bool_)
mask[800:1000] = True
np.testing.assert_array_equal(overlap.data.mask, mask)
np.testing.assert_array_equal(overlap.data.data[:800], tr1.data[:800])
np.testing.assert_array_equal(overlap.data.data[1000:], tr2.data[200:])
# overlap + gap
overlap_gap = overlap + tr3
self.assertEqual(len(overlap_gap), 3400)
mask = np.zeros(3400).astype(np.bool_)
mask[800:1000] = True
mask[1800:2400] = True
np.testing.assert_array_equal(overlap_gap.data.mask, mask)
np.testing.assert_array_equal(overlap_gap.data.data[:800],
tr1.data[:800])
np.testing.assert_array_equal(overlap_gap.data.data[1000:1800],
tr2.data[200:])
np.testing.assert_array_equal(overlap_gap.data.data[2400:], tr3.data)
# gap
gap = tr2 + tr3
self.assertEqual(len(gap), 2600)
mask = np.zeros(2600).astype(np.bool_)
mask[1000:1600] = True
np.testing.assert_array_equal(gap.data.mask, mask)
np.testing.assert_array_equal(gap.data.data[:1000], tr2.data)
np.testing.assert_array_equal(gap.data.data[1600:], tr3.data)
def test_add_into_gap(self):
"""
Test __add__ method of the Trace class
Adding a trace that fits perfectly into gap in a trace
"""
my_array = np.arange(6, dtype=np.int32)
stats = Stats()
stats.network = 'VI'
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 0)
stats['npts'] = 0
stats['station'] = 'IKJA'
stats['channel'] = 'EHZ'
stats['sampling_rate'] = 1
bigtrace = Trace(data=np.array([], dtype=np.int32), header=stats)
bigtrace_sort = bigtrace.copy()
stats['npts'] = len(my_array)
my_trace = Trace(data=my_array, header=stats)
stats['npts'] = 2
trace1 = Trace(data=my_array[0:2].copy(), header=stats)
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 2)
trace2 = Trace(data=my_array[2:4].copy(), header=stats)
stats['starttime'] = UTCDateTime(2009, 8, 5, 0, 0, 4)
trace3 = Trace(data=my_array[4:6].copy(), header=stats)
tr1 = bigtrace
tr2 = bigtrace_sort
for method in [0, 1]:
# Random
bigtrace = tr1.copy()
bigtrace = bigtrace.__add__(trace1, method=method)
bigtrace = bigtrace.__add__(trace3, method=method)
bigtrace = bigtrace.__add__(trace2, method=method)
# Sorted
bigtrace_sort = tr2.copy()
bigtrace_sort = bigtrace_sort.__add__(trace1, method=method)
bigtrace_sort = bigtrace_sort.__add__(trace2, method=method)
bigtrace_sort = bigtrace_sort.__add__(trace3, method=method)
for tr in (bigtrace, bigtrace_sort):
self.assertTrue(isinstance(tr, Trace))
self.assertFalse(isinstance(tr.data, np.ma.masked_array))
self.assertTrue((bigtrace_sort.data == my_array).all())
fail_pattern = "\n\tExpected %s\n\tbut got %s"
failinfo = fail_pattern % (my_trace, bigtrace_sort)
failinfo += fail_pattern % (my_trace.data, bigtrace_sort.data)
self.assertEqual(bigtrace_sort, my_trace, failinfo)
failinfo = fail_pattern % (my_array, bigtrace.data)
self.assertTrue((bigtrace.data == my_array).all(), failinfo)
failinfo = fail_pattern % (my_trace, bigtrace)
failinfo += fail_pattern % (my_trace.data, bigtrace.data)
self.assertEqual(bigtrace, my_trace, failinfo)
for array_ in (bigtrace.data, bigtrace_sort.data):
failinfo = fail_pattern % (my_array.dtype, array_.dtype)
self.assertEqual(my_array.dtype, array_.dtype, failinfo)
def test_slice(self):
"""
Tests the slicing of trace objects.
"""
tr = Trace(data=np.arange(10, dtype=np.int32))
mempos = tr.data.ctypes.data
t = tr.stats.starttime
tr1 = tr.slice(t + 2, t + 8)
tr1.data[0] = 10
self.assertEqual(tr.data[2], 10)
self.assertEqual(tr.data.ctypes.data, mempos)
self.assertEqual(tr.data[2:9].ctypes.data, tr1.data.ctypes.data)
self.assertEqual(tr1.data.ctypes.data - 8, mempos)
# Test the processing information for the slicing. The sliced trace
# should have a processing information showing that it has been
# trimmed. The original trace should have nothing.
tr = Trace(data=np.arange(10, dtype=np.int32))
tr2 = tr.slice(tr.stats.starttime)
self.assertNotIn("processing", tr.stats)
self.assertIn("processing", tr2.stats)
self.assertIn("trim", tr2.stats.processing[0])
def test_slice_no_starttime_or_endtime(self):
"""
Tests the slicing of trace objects with no start time or end time
provided. Compares results against the equivalent trim() operation
"""
tr_orig = Trace(data=np.arange(10, dtype=np.int32))
tr = tr_orig.copy()
# two time points outside the trace and two inside
t1 = tr.stats.starttime - 2
t2 = tr.stats.starttime + 2
t3 = tr.stats.endtime - 3
t4 = tr.stats.endtime + 2
# test 1: only removing data at left side
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t2)
self.assertEqual(tr_trim, tr.slice(starttime=t2))
tr2 = tr.slice(starttime=t2, endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 2: only removing data at right side
tr_trim = tr_orig.copy()
tr_trim.trim(endtime=t3)
self.assertEqual(tr_trim, tr.slice(endtime=t3))
tr2 = tr.slice(starttime=t1, endtime=t3)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 3: not removing data at all
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t1, endtime=t4)
tr2 = tr.slice()
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(endtime=t4)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1, endtime=t4)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr_trim.trim()
tr2 = tr.slice()
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t1, endtime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
# test 4: removing data at left and right side
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t2, endtime=t3)
self.assertEqual(tr_trim, tr.slice(t2, t3))
self.assertEqual(tr_trim, tr.slice(starttime=t2, endtime=t3))
# test 5: no data left after operation
tr_trim = tr_orig.copy()
tr_trim.trim(starttime=t4)
tr2 = tr.slice(starttime=t4)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
tr2 = tr.slice(starttime=t4, endtime=t4 + 1)
self.__remove_processing(tr_trim)
self.__remove_processing(tr2)
self.assertEqual(tr_trim, tr2)
def test_slice_nearest_sample(self):
"""
Tests slicing with the nearest sample flag set to on or off.
"""
tr = Trace(data=np.arange(6))
# Samples at:
# 0 10 20 30 40 50
tr.stats.sampling_rate = 0.1
# Nearest sample flag defaults to true.
tr2 = tr.slice(UTCDateTime(4), UTCDateTime(44))
self.assertEqual(tr2.stats.starttime, UTCDateTime(0))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
tr2 = tr.slice(UTCDateTime(8), UTCDateTime(48))
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(50))
# Setting it to False changes the returned values.
tr2 = tr.slice(UTCDateTime(4), UTCDateTime(44), nearest_sample=False)
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
tr2 = tr.slice(UTCDateTime(8), UTCDateTime(48), nearest_sample=False)
self.assertEqual(tr2.stats.starttime, UTCDateTime(10))
self.assertEqual(tr2.stats.endtime, UTCDateTime(40))
def test_trim_floating_point(self):
"""
Tests the slicing of trace objects.
"""
# Create test array that allows for easy testing.
tr = Trace(data=np.arange(11))
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Just some sanity tests.
self.assertEqual(tr.stats.starttime, UTCDateTime(0))
self.assertEqual(tr.stats.endtime, UTCDateTime(10))
# Create temp trace object used for testing.
st = tr.stats.starttime
# This is supposed to include the start and end times and should
# therefore cut right at 2 and 8.
temp = deepcopy(tr)
temp.trim(st + 2.1, st + 7.1)
# Should be identical.
temp2 = deepcopy(tr)
temp2.trim(st + 2.0, st + 8.0)
self.assertEqual(temp.stats.starttime, UTCDateTime(2))
self.assertEqual(temp.stats.endtime, UTCDateTime(7))
self.assertEqual(temp.stats.npts, 6)
self.assertEqual(temp2.stats.npts, 7)
# self.assertEqual(temp.stats, temp2.stats)
np.testing.assert_array_equal(temp.data, temp2.data[:-1])
# Create test array that allows for easy testing.
# Check if the data is the same.
self.assertNotEqual(temp.data.ctypes.data, tr.data[2:9].ctypes.data)
np.testing.assert_array_equal(tr.data[2:8], temp.data)
# Using out of bounds times should not do anything but create
# a copy of the stats.
temp = deepcopy(tr)
temp.trim(st - 2.5, st + 200)
# The start and end times should not change.
self.assertEqual(temp.stats.starttime, UTCDateTime(0))
self.assertEqual(temp.stats.endtime, UTCDateTime(10))
self.assertEqual(temp.stats.npts, 11)
# Alter the new stats to make sure the old one stays intact.
temp.stats.starttime = UTCDateTime(1000)
self.assertEqual(org_stats, tr.stats)
# Check if the data address is not the same, that is it is a copy
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
np.testing.assert_array_equal(tr.data, temp.data)
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
# Use more complicated times and sampling rate.
tr = Trace(data=np.arange(111))
tr.stats.starttime = UTCDateTime(111.11111)
tr.stats.sampling_rate = 50.0
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Create temp trace object used for testing.
temp = deepcopy(tr)
temp.trim(UTCDateTime(111.22222), UTCDateTime(112.99999),
nearest_sample=False)
# Should again be identical. XXX NOT!
temp2 = deepcopy(tr)
temp2.trim(UTCDateTime(111.21111), UTCDateTime(113.01111),
nearest_sample=False)
np.testing.assert_array_equal(temp.data, temp2.data[1:-1])
# Check stuff.
self.assertEqual(temp.stats.starttime, UTCDateTime(111.23111))
self.assertEqual(temp.stats.endtime, UTCDateTime(112.991110))
# Check if the data is the same.
temp = deepcopy(tr)
temp.trim(UTCDateTime(0), UTCDateTime(1000 * 1000))
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
# starttime must be in conformance with sampling rate
t = UTCDateTime(111.11111)
self.assertEqual(temp.stats.starttime, t)
delta = int((tr.stats.starttime - t) * tr.stats.sampling_rate + .5)
np.testing.assert_array_equal(tr.data, temp.data[delta:delta + 111])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_trim_floating_point_with_padding_1(self):
"""
Tests the slicing of trace objects with the use of the padding option.
"""
# Create test array that allows for easy testing.
tr = Trace(data=np.arange(11))
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Just some sanity tests.
self.assertEqual(tr.stats.starttime, UTCDateTime(0))
self.assertEqual(tr.stats.endtime, UTCDateTime(10))
# Create temp trace object used for testing.
st = tr.stats.starttime
# Using out of bounds times should not do anything but create
# a copy of the stats.
temp = deepcopy(tr)
temp.trim(st - 2.5, st + 200, pad=True)
self.assertEqual(temp.stats.starttime.timestamp, -2.0)
self.assertEqual(temp.stats.endtime.timestamp, 200)
self.assertEqual(temp.stats.npts, 203)
mask = np.zeros(203).astype(np.bool_)
mask[:2] = True
mask[13:] = True
np.testing.assert_array_equal(temp.data.mask, mask)
# Alter the new stats to make sure the old one stays intact.
temp.stats.starttime = UTCDateTime(1000)
self.assertEqual(org_stats, tr.stats)
# Check if the data address is not the same, that is it is a copy
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
np.testing.assert_array_equal(tr.data, temp.data[2:13])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_trim_floating_point_with_padding_2(self):
"""
Use more complicated times and sampling rate.
"""
tr = Trace(data=np.arange(111))
tr.stats.starttime = UTCDateTime(111.11111)
tr.stats.sampling_rate = 50.0
org_stats = deepcopy(tr.stats)
org_data = deepcopy(tr.data)
# Save memory position of array.
mem_pos = tr.data.ctypes.data
# Create temp trace object used for testing.
temp = deepcopy(tr)
temp.trim(UTCDateTime(111.22222), UTCDateTime(112.99999),
nearest_sample=False)
# Should again be identical.#XXX not
temp2 = deepcopy(tr)
temp2.trim(UTCDateTime(111.21111), UTCDateTime(113.01111),
nearest_sample=False)
np.testing.assert_array_equal(temp.data, temp2.data[1:-1])
# Check stuff.
self.assertEqual(temp.stats.starttime, UTCDateTime(111.23111))
self.assertEqual(temp.stats.endtime, UTCDateTime(112.991110))
# Check if the data is the same.
temp = deepcopy(tr)
temp.trim(UTCDateTime(0), UTCDateTime(1000 * 1000), pad=True)
self.assertNotEqual(temp.data.ctypes.data, tr.data.ctypes.data)
# starttime must be in conformance with sampling rate
t = UTCDateTime(1969, 12, 31, 23, 59, 59, 991110)
self.assertEqual(temp.stats.starttime, t)
delta = int((tr.stats.starttime - t) * tr.stats.sampling_rate + .5)
np.testing.assert_array_equal(tr.data, temp.data[delta:delta + 111])
# Make sure the original Trace object did not change.
np.testing.assert_array_equal(tr.data, org_data)
self.assertEqual(tr.data.ctypes.data, mem_pos)
self.assertEqual(tr.stats, org_stats)
def test_add_sanity(self):
"""
Test sanity checks in __add__ method of the Trace object.
"""
tr = Trace(data=np.arange(10))
# you may only add a Trace object
self.assertRaises(TypeError, tr.__add__, 1234)
self.assertRaises(TypeError, tr.__add__, '1234')
self.assertRaises(TypeError, tr.__add__, [1, 2, 3, 4])
# trace id
tr2 = Trace()
tr2.stats.station = 'TEST'
self.assertRaises(TypeError, tr.__add__, tr2)
# sample rate
tr2 = Trace()
tr2.stats.sampling_rate = 20
self.assertRaises(TypeError, tr.__add__, tr2)
# calibration factor
tr2 = Trace()
tr2.stats.calib = 20
self.assertRaises(TypeError, tr.__add__, tr2)
# data type
tr2 = Trace()
tr2.data = np.arange(10, dtype=np.float32)
self.assertRaises(TypeError, tr.__add__, tr2)
def test_add_overlaps_default_method(self):
"""
Test __add__ method of the Trace object.
"""
# 1
# overlapping trace with differing data
# Trace 1: 0000000
# Trace 2: 1111111
tr1 = Trace(data=np.zeros(7))
tr2 = Trace(data=np.ones(7))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 00000--11111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 1, 1, 1, 1, 1])
# 2 + 1 : 00000--11111
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 1, 1, 1, 1, 1])
# 2
# overlapping trace with same data
# Trace 1: 0000000
# Trace 2: 0000000
tr1 = Trace(data=np.zeros(7))
tr2 = Trace(data=np.zeros(7))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 000000000000
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.zeros(12))
# 2 + 1 : 000000000000
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.zeros(12))
# 3
# contained trace with same data
# Trace 1: 1111111111
# Trace 2: 11
tr1 = Trace(data=np.ones(10))
tr2 = Trace(data=np.ones(2))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 1111111111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 2 + 1 : 1111111111
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 4
# contained trace with differing data
# Trace 1: 0000000000
# Trace 2: 11
tr1 = Trace(data=np.zeros(10))
tr2 = Trace(data=np.ones(2))
tr2.stats.starttime = tr1.stats.starttime + 5
# 1 + 2 : 00000--000
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 0, 0, 0])
# 2 + 1 : 00000--000
tr = tr2 + tr1
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(),
[0, 0, 0, 0, 0, None, None, 0, 0, 0])
# 5
# completely contained trace with same data until end
# Trace 1: 1111111111
# Trace 2: 1111111111
tr1 = Trace(data=np.ones(10))
tr2 = Trace(data=np.ones(10))
# 1 + 2 : 1111111111
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ndarray))
np.testing.assert_array_equal(tr.data, np.ones(10))
# 6
# completely contained trace with differing data
# Trace 1: 0000000000
# Trace 2: 1111111111
tr1 = Trace(data=np.zeros(10))
tr2 = Trace(data=np.ones(10))
# 1 + 2 : ----------
tr = tr1 + tr2
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data.tolist(), [None] * 10)
def test_add_with_different_sampling_rates(self):
"""
Test __add__ method of the Trace object.
"""
# 1 - different sampling rates for the same channel should fail
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
self.assertRaises(TypeError, tr1.__add__, tr2)
self.assertRaises(TypeError, tr2.__add__, tr1)
# 2 - different sampling rates for the different channels works
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5))
tr3.stats.sampling_rate = 200
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5))
tr4.stats.sampling_rate = 50
tr4.stats.channel = 'EHZ'
# same sampling rate and ids should not fail
tr1 + tr3
tr3 + tr1
tr2 + tr4
tr4 + tr2
def test_add_with_different_datatypes_or_id(self):
"""
Test __add__ method of the Trace object.
"""
# 1 - different data types for the same channel should fail
tr1 = Trace(data=np.zeros(5, dtype=np.int32))
tr2 = Trace(data=np.zeros(5, dtype=np.float32))
self.assertRaises(TypeError, tr1.__add__, tr2)
self.assertRaises(TypeError, tr2.__add__, tr1)
# 2 - different sampling rates for the different channels works
tr1 = Trace(data=np.zeros(5, dtype=np.int32))
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5, dtype=np.float32))
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5, dtype=np.int32))
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5, dtype=np.float32))
tr4.stats.channel = 'EHZ'
# same data types and ids should not fail
tr1 + tr3
tr3 + tr1
tr2 + tr4
tr4 + tr2
# adding traces with different ids should raise
self.assertRaises(TypeError, tr1.__add__, tr2)
self.assertRaises(TypeError, tr3.__add__, tr4)
self.assertRaises(TypeError, tr2.__add__, tr1)
self.assertRaises(TypeError, tr4.__add__, tr3)
def test_comparisons(self):
"""
Tests all rich comparison operators (==, !=, <, <=, >, >=)
The latter four are not implemented due to ambiguous meaning and bounce
an error.
"""
# create test traces
tr0 = Trace(np.arange(3))
tr1 = Trace(np.arange(3))
tr2 = Trace(np.arange(3), {'station': 'X'})
tr3 = Trace(np.arange(3), {'processing':
["filter:lowpass:{'freq': 10}"]})
tr4 = Trace(np.arange(5))
tr5 = Trace(np.arange(5), {'station': 'X'})
tr6 = Trace(np.arange(5), {'processing':
["filter:lowpass:{'freq': 10}"]})
tr7 = Trace(np.array([1, 1, 1]))
# tests that should raise a NotImplementedError (i.e. <=, <, >=, >)
self.assertRaises(NotImplementedError, tr1.__lt__, tr1)
self.assertRaises(NotImplementedError, tr1.__le__, tr1)
self.assertRaises(NotImplementedError, tr1.__gt__, tr1)
self.assertRaises(NotImplementedError, tr1.__ge__, tr1)
self.assertRaises(NotImplementedError, tr1.__lt__, tr2)
self.assertRaises(NotImplementedError, tr1.__le__, tr2)
self.assertRaises(NotImplementedError, tr1.__gt__, tr2)
self.assertRaises(NotImplementedError, tr1.__ge__, tr2)
# normal tests
self.assertEqual(tr0 == tr0, True)
self.assertEqual(tr0 == tr1, True)
self.assertEqual(tr0 == tr2, False)
self.assertEqual(tr0 == tr3, False)
self.assertEqual(tr0 == tr4, False)
self.assertEqual(tr0 == tr5, False)
self.assertEqual(tr0 == tr6, False)
self.assertEqual(tr0 == tr7, False)
self.assertEqual(tr5 == tr0, False)
self.assertEqual(tr5 == tr1, False)
self.assertEqual(tr5 == tr2, False)
self.assertEqual(tr5 == tr3, False)
self.assertEqual(tr5 == tr4, False)
self.assertEqual(tr5 == tr5, True)
self.assertEqual(tr5 == tr6, False)
self.assertEqual(tr3 == tr6, False)
self.assertEqual(tr0 != tr0, False)
self.assertEqual(tr0 != tr1, False)
self.assertEqual(tr0 != tr2, True)
self.assertEqual(tr0 != tr3, True)
self.assertEqual(tr0 != tr4, True)
self.assertEqual(tr0 != tr5, True)
self.assertEqual(tr0 != tr6, True)
self.assertEqual(tr0 != tr7, True)
self.assertEqual(tr5 != tr0, True)
self.assertEqual(tr5 != tr1, True)
self.assertEqual(tr5 != tr2, True)
self.assertEqual(tr5 != tr3, True)
self.assertEqual(tr5 != tr4, True)
self.assertEqual(tr5 != tr5, False)
self.assertEqual(tr5 != tr6, True)
self.assertEqual(tr3 != tr6, True)
# some weirder tests against non-Trace objects
for object in [0, 1, 0.0, 1.0, "", "test", True, False, [], [tr0],
set(), set(tr0), {}, {"test": "test"}, [], None, ]:
self.assertEqual(tr0 == object, False)
self.assertEqual(tr0 != object, True)
def test_nearest_sample(self):
"""
This test case shows that the libmseed is actually flooring the
starttime to the next sample value, regardless if it is the nearest
sample. The flag nearest_sample=True tries to avoids this and
rounds it to the next actual possible sample point.
"""
# set up
trace = Trace(data=np.empty(10000))
trace.stats.starttime = UTCDateTime("2010-06-20T20:19:40.000000Z")
trace.stats.sampling_rate = 200.0
# ltrim
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.494999Z")
tr._ltrim(t - 3, nearest_sample=True)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.starttime,
UTCDateTime("2010-06-20T20:19:48.495000Z"))
# Lots of tests follow that thoroughly check the cutting behavior
# using nearest_sample=True/False
# rtrim
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.494999Z")
tr._rtrim(t + 7, nearest_sample=True)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.495000Z")
tr._rtrim(t + 7, nearest_sample=True)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.495111Z")
tr._rtrim(t + 7, nearest_sample=True)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.497501Z")
tr._rtrim(t + 7, nearest_sample=True)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.500000Z"))
# rtrim
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.494999Z")
tr._rtrim(t + 7, nearest_sample=False)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.490000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.495000Z")
tr._rtrim(t + 7, nearest_sample=False)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.495111Z")
tr._rtrim(t + 7, nearest_sample=False)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
tr = deepcopy(trace)
t = UTCDateTime("2010-06-20T20:19:51.497500Z")
tr._rtrim(t + 7, nearest_sample=False)
# see that it is actually rounded to the next sample point
self.assertEqual(tr.stats.endtime,
UTCDateTime("2010-06-20T20:19:58.495000Z"))
def test_masked_array_to_string(self):
"""
Masked arrays should be marked using __str__.
"""
st = read()
overlaptrace = st[0].copy()
overlaptrace.stats.starttime += 1
st.append(overlaptrace)
st.merge()
out = st[0].__str__()
self.assertTrue(out.endswith('(masked)'))
def test_detrend(self):
"""
Test detrend method of trace
"""
t = np.arange(10)
data = 0.1 * t + 1.
tr = Trace(data=data.copy())
tr.detrend(type='simple')
np.testing.assert_array_almost_equal(tr.data, np.zeros(10))
tr.data = data.copy()
tr.detrend(type='linear')
np.testing.assert_array_almost_equal(tr.data, np.zeros(10))
data = np.zeros(10)
data[3:7] = 1.
tr.data = data.copy()
tr.detrend(type='simple')
np.testing.assert_almost_equal(tr.data[0], 0.)
np.testing.assert_almost_equal(tr.data[-1], 0.)
tr.data = data.copy()
tr.detrend(type='linear')
np.testing.assert_almost_equal(tr.data[0], -0.4)
np.testing.assert_almost_equal(tr.data[-1], -0.4)
def test_differentiate(self):
"""
Test differentiation method of trace
"""
t = np.linspace(0., 1., 11)
data = 0.1 * t + 1.
tr = Trace(data=data)
tr.stats.delta = 0.1
tr.differentiate(method='gradient')
np.testing.assert_array_almost_equal(tr.data, np.ones(11) * 0.1)
def test_integrate(self):
"""
Test integration method of trace
"""
data = np.ones(101) * 0.01
tr = Trace(data=data)
tr.stats.delta = 0.1
tr.integrate()
# Assert time and length of resulting array.
self.assertEqual(tr.stats.starttime, UTCDateTime(0))
self.assertEqual(tr.stats.npts, 101)
np.testing.assert_array_almost_equal(
tr.data, np.concatenate([[0.0], np.cumsum(data)[:-1] * 0.1]))
def test_issue_317(self):
"""
Tests times after breaking a stream into parts and merging it again.
"""
# create a sample trace
org_trace = Trace(data=np.arange(22487))
org_trace.stats.starttime = UTCDateTime()
org_trace.stats.sampling_rate = 0.999998927116
num_pakets = 10
# break org_trace into set of contiguous packet data
traces = []
packet_length = int(np.size(org_trace.data) / num_pakets)
delta_time = org_trace.stats.delta
tstart = org_trace.stats.starttime
tend = tstart + delta_time * float(packet_length - 1)
for i in range(num_pakets):
tr = Trace(org_trace.data, org_trace.stats)
tr = tr.slice(tstart, tend)
traces.append(tr)
tstart = tr.stats.endtime + delta_time
tend = tstart + delta_time * float(packet_length - 1)
# reconstruct original trace by adding together packet traces
sum_trace = traces[0].copy()
npts = traces[0].stats.npts
for i in range(1, len(traces)):
sum_trace = sum_trace.__add__(traces[i].copy(), method=0,
interpolation_samples=0,
fill_value='latest',
sanity_checks=True)
# check npts
self.assertEqual(traces[i].stats.npts, npts)
self.assertEqual(sum_trace.stats.npts, (i + 1) * npts)
# check data
np.testing.assert_array_equal(traces[i].data,
np.arange(i * npts, (i + 1) * npts))
np.testing.assert_array_equal(sum_trace.data,
np.arange(0, (i + 1) * npts))
# check delta
self.assertEqual(traces[i].stats.delta, org_trace.stats.delta)
self.assertEqual(sum_trace.stats.delta, org_trace.stats.delta)
# check sampling rates
self.assertAlmostEqual(traces[i].stats.sampling_rate,
org_trace.stats.sampling_rate)
self.assertAlmostEqual(sum_trace.stats.sampling_rate,
org_trace.stats.sampling_rate)
# check end times
self.assertEqual(traces[i].stats.endtime, sum_trace.stats.endtime)
def test_verify(self):
"""
Tests verify method.
"""
# empty Trace
tr = Trace()
tr.verify()
# Trace with a single sample (issue #357)
tr = Trace(data=np.array([1]))
tr.verify()
# example Trace
tr = read()[0]
tr.verify()
def test_percent_in_str(self):
"""
Tests if __str__ method is working with percent sign (%).
"""
tr = Trace()
tr.stats.station = '%t3u'
self.assertTrue(tr.__str__().startswith(".%t3u.. | 1970"))
def test_taper(self):
"""
Test taper method of trace
"""
data = np.ones(10)
tr = Trace(data=data)
tr.taper(max_percentage=0.05, type='cosine')
for i in range(len(data)):
self.assertLessEqual(tr.data[i], 1.)
self.assertGreaterEqual(tr.data[i], 0.)
def test_taper_onesided(self):
"""
Test onesided taper method of trace
"""
data = np.ones(11)
tr = Trace(data=data)
# overlong taper - raises UserWarning - ignoring
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
tr.taper(max_percentage=None, side="left")
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, UserWarning)
self.assertTrue(tr.data[:5].sum() < 5.)
self.assertEqual(tr.data[6:].sum(), 5.)
data = np.ones(11)
tr = Trace(data=data)
# overlong taper - raises UserWarning - ignoring
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
tr.taper(max_percentage=None, side="right")
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, UserWarning)
self.assertEqual(tr.data[:5].sum(), 5.)
self.assertTrue(tr.data[6:].sum() < 5.)
def test_taper_length(self):
npts = 11
type_ = "hann"
data = np.ones(npts)
tr = Trace(data=data, header={'sampling': 1.})
# test an overlong taper request, still works but raises UserWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
tr.taper(max_percentage=0.7, max_length=int(npts / 2) + 1)
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, UserWarning)
data = np.ones(npts)
tr = Trace(data=data, header={'sampling': 1.})
# first 3 samples get tapered
tr.taper(max_percentage=None, type=type_, side="left", max_length=3)
# last 5 samples get tapered
tr.taper(max_percentage=0.5, type=type_, side="right", max_length=None)
self.assertTrue(np.all(tr.data[:3] < 1.))
self.assertTrue(np.all(tr.data[3:6] == 1.))
self.assertTrue(np.all(tr.data[6:] < 1.))
data = np.ones(npts)
tr = Trace(data=data, header={'sampling': 1.})
# first 3 samples get tapered
tr.taper(max_percentage=0.5, type=type_, side="left", max_length=3)
# last 3 samples get tapered
tr.taper(max_percentage=0.3, type=type_, side="right", max_length=5)
self.assertTrue(np.all(tr.data[:3] < 1.))
self.assertTrue(np.all(tr.data[3:8] == 1.))
self.assertTrue(np.all(tr.data[8:] < 1.))
def test_times(self):
"""
Test if the correct times array is returned for normal traces and
traces with gaps.
"""
tr = Trace(data=np.ones(100))
tr.stats.sampling_rate = 20
delta = tr.stats.delta
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr.stats.starttime = start
tm = tr.times()
self.assertAlmostEqual(tm[-1], tr.stats.endtime - tr.stats.starttime)
tr.data = np.ma.ones(100)
tr.data[30:40] = np.ma.masked
tm = tr.times()
self.assertTrue(np.alltrue(tr.data.mask == tm.mask))
# test relative with reftime
tr.data = np.ones(100)
shift = 9.5
reftime = start - shift
got = tr.times(reftime=reftime)
self.assertEqual(len(got), tr.stats.npts)
expected = np.arange(shift, shift + 4.5 * delta, delta)
np.testing.assert_allclose(got[:5], expected, rtol=1e-8)
# test other options
got = tr.times("utcdatetime")
expected = np.array([
UTCDateTime(2000, 1, 1, 0, 0),
UTCDateTime(2000, 1, 1, 0, 0, 0, 50000),
UTCDateTime(2000, 1, 1, 0, 0, 0, 100000),
UTCDateTime(2000, 1, 1, 0, 0, 0, 150000),
UTCDateTime(2000, 1, 1, 0, 0, 0, 200000)], dtype=UTCDateTime)
self.assertTrue(isinstance(got[0], UTCDateTime))
np.testing.assert_allclose(
[t_.timestamp for t_ in got[:5]],
[t_.timestamp for t_ in expected], rtol=1e-17)
got = tr.times("timestamp")
expected = np.arange(0, 4.5 * delta, delta) + 946684800.0
np.testing.assert_allclose(got[:5], expected, rtol=1e-17)
got = tr.times("matplotlib")
expected = np.array([
730120.00000000000000000000, 730120.00000057870056480169,
730120.00000115740112960339, 730120.00000173610169440508,
730120.00000231480225920677])
np.testing.assert_allclose(got[:5], expected, rtol=1e-17)
def test_modulo_operation(self):
"""
Method for testing the modulo operation. Mainly tests part not covered
by the doctests.
"""
tr = Trace(data=np.arange(25))
# Wrong type raises.
self.assertRaises(TypeError, tr.__mod__, 5.0)
self.assertRaises(TypeError, tr.__mod__, "123")
# Needs to be a positive integer.
self.assertRaises(ValueError, tr.__mod__, 0)
self.assertRaises(ValueError, tr.__mod__, -11)
# If num is more then the number of samples, a copy will be returned.
st = tr % 500
self.assertEqual(tr, st[0])
self.assertEqual(len(st), 1)
self.assertFalse(tr.data is st[0].data)
def test_plot(self):
"""
Tests plot method if matplotlib is installed
"""
tr = Trace(data=np.arange(25))
tr.plot(show=False)
def test_spectrogram(self):
"""
Tests spectrogram method if matplotlib is installed
"""
tr = Trace(data=np.arange(25))
tr.stats.sampling_rate = 20
tr.spectrogram(show=False)
def test_raise_masked(self):
"""
Tests that detrend() raises in case of a masked array. (see #498)
"""
x = np.arange(10)
x = np.ma.masked_inside(x, 3, 4)
tr = Trace(x)
self.assertRaises(NotImplementedError, tr.detrend)
def test_split(self):
"""
Tests split method of the Trace class.
"""
# set up
tr1 = Trace(data=np.arange(1000))
tr1.stats.sampling_rate = 200
start = UTCDateTime(2000, 1, 1, 0, 0, 0, 0)
tr1.stats.starttime = start
tr2 = Trace(data=np.arange(0, 1000)[::-1])
tr2.stats.sampling_rate = 200
tr2.stats.starttime = start + 10
# add will create new trace with masked array
trace = tr1 + tr2
self.assertTrue(isinstance(trace.data, np.ma.masked_array))
# split
self.assertTrue(isinstance(trace, Trace))
st = trace.split()
self.assertTrue(isinstance(st, Stream))
self.assertEqual(len(st[0]), 1000)
self.assertEqual(len(st[1]), 1000)
# check if have no masked arrays
self.assertFalse(isinstance(st[0].data, np.ma.masked_array))
self.assertFalse(isinstance(st[1].data, np.ma.masked_array))
def test_split_empty_masked_array(self):
"""
Test split method with a masked array without any data.
"""
tr = Trace(data=np.ma.masked_all(100))
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertTrue(isinstance(tr, Trace))
st = tr.split()
self.assertTrue(isinstance(st, Stream))
self.assertEqual(len(st), 0)
def test_split_masked_array_without_actually_masked_values(self):
"""
Tests splitting a masked array without actually masked data.
"""
# First non masked.
tr = Trace(data=np.arange(100))
st = tr.copy().split()
self.assertEqual(len(st), 1)
self.assertEqual(tr, st[0])
self.assertFalse(isinstance(st[0].data, np.ma.masked_array))
# Now the same thing but with an initially masked array but no
# masked values.
tr = Trace(data=np.ma.arange(100))
self.assertFalse(tr.data.mask)
st = tr.copy().split()
self.assertEqual(len(st), 1)
self.assertEqual(tr, st[0])
self.assertFalse(isinstance(st[0].data, np.ma.masked_array))
def test_simulate_evalresp(self):
"""
Tests that trace.simulate calls evalresp with the correct network,
station, location and channel information.
"""
tr = read()[0]
# Wrap in try/except as it of course will fail because the mocked
# function returns None.
try:
with mock.patch("obspy.signal.invsim.evalresp") as patch:
tr.simulate(seedresp={"filename": "RESP.dummy",
"units": "VEL",
"date": tr.stats.starttime})
except Exception:
pass
self.assertEqual(patch.call_count, 1)
_, kwargs = patch.call_args
# Make sure that every item of the trace is passed to the evalresp
# function.
for key in ["network", "station", "location", "channel"]:
self.assertEqual(
kwargs[key if key != "location" else "locid"], tr.stats[key],
msg="'%s' did not get passed on to evalresp" % key)
def test_issue_540(self):
"""
Trim with pad=True and given fill value should not return a masked
NumPy array.
"""
# fill_value = None
tr = read()[0]
self.assertEqual(len(tr), 3000)
tr.trim(starttime=tr.stats.starttime - 0.01,
endtime=tr.stats.endtime + 0.01, pad=True, fill_value=None)
self.assertEqual(len(tr), 3002)
self.assertTrue(isinstance(tr.data, np.ma.masked_array))
self.assertIs(tr.data[0], np.ma.masked)
self.assertTrue(tr.data[1] is not np.ma.masked)
self.assertTrue(tr.data[-2] is not np.ma.masked)
self.assertIs(tr.data[-1], np.ma.masked)
# fill_value = 999
tr = read()[0]
self.assertEqual(len(tr), 3000)
tr.trim(starttime=tr.stats.starttime - 0.01,
endtime=tr.stats.endtime + 0.01, pad=True, fill_value=999)
self.assertEqual(len(tr), 3002)
self.assertFalse(isinstance(tr.data, np.ma.masked_array))
self.assertEqual(tr.data[0], 999)
self.assertEqual(tr.data[-1], 999)
# given fill_value but actually no padding at all
tr = read()[0]
self.assertEqual(len(tr), 3000)
tr.trim(starttime=tr.stats.starttime,
endtime=tr.stats.endtime, pad=True, fill_value=-999)
self.assertEqual(len(tr), 3000)
self.assertFalse(isinstance(tr.data, np.ma.masked_array))
def test_resample(self):
"""
Tests the resampling of traces.
"""
tr = read()[0]
self.assertEqual(tr.stats.sampling_rate, 100.0)
self.assertEqual(tr.stats.npts, 3000)
tr_2 = tr.copy().resample(sampling_rate=50.0)
self.assertEqual(tr_2.stats.endtime, tr.stats.endtime - 1.0 / 100.0)
self.assertEqual(tr_2.stats.sampling_rate, 50.0)
self.assertEqual(tr_2.stats.starttime, tr.stats.starttime)
tr_3 = tr.copy().resample(sampling_rate=10.0)
self.assertEqual(tr_3.stats.endtime, tr.stats.endtime - 9.0 / 100.0)
self.assertEqual(tr_3.stats.sampling_rate, 10.0)
self.assertEqual(tr_3.stats.starttime, tr.stats.starttime)
tr_4 = tr.copy()
tr_4.data = np.require(tr_4.data,
dtype=tr_4.data.dtype.newbyteorder('>'))
tr_4 = tr_4.resample(sampling_rate=10.0)
self.assertEqual(tr_4.stats.endtime, tr.stats.endtime - 9.0 / 100.0)
self.assertEqual(tr_4.stats.sampling_rate, 10.0)
self.assertEqual(tr_4.stats.starttime, tr.stats.starttime)
def test_method_chaining(self):
"""
Tests that method chaining works for all methods on the Trace object
where it is sensible.
"""
# This essentially just checks that the methods are chainable. The
# methods are tested elsewhere and a full test would be a lot of work
# with questionable return.
tr = read()[0]
temp_tr = tr.trim(tr.stats.starttime + 1)\
.verify()\
.filter("lowpass", freq=2.0)\
.simulate(paz_remove={'poles': [-0.037004 + 0.037016j,
-0.037004 - 0.037016j,
-251.33 + 0j],
'zeros': [0j, 0j],
'gain': 60077000.0,
'sensitivity': 2516778400.0})\
.trigger(type="zdetect", nsta=20)\
.decimate(factor=2, no_filter=True)\
.resample(tr.stats.sampling_rate / 2.0)\
.differentiate()\
.integrate()\
.detrend()\
.taper(max_percentage=0.05, type='cosine')\
.normalize()
self.assertIs(temp_tr, tr)
self.assertTrue(isinstance(tr, Trace))
self.assertGreater(tr.stats.npts, 0)
# Use the processing chain to check the results. The trim() methods
# does not have an entry in the processing chain.
pr = tr.stats.processing
self.assertIn("trim", pr[0])
self.assertTrue("filter" in pr[1] and "lowpass" in pr[1])
self.assertIn("simulate", pr[2])
self.assertIn("trigger", pr[3])
self.assertIn("decimate", pr[4])
self.assertIn("resample", pr[5])
self.assertIn("differentiate", pr[6])
self.assertIn("integrate", pr[7])
self.assertIn("detrend", pr[8])
self.assertIn("taper", pr[9])
self.assertIn("normalize", pr[10])
def test_skip_empty_trace(self):
tr = read()[0]
t = tr.stats.endtime + 10
tr.trim(t, t + 10)
tr.detrend()
tr.resample(400)
tr.differentiate()
tr.integrate()
tr.taper(max_percentage=0.1)
def test_issue_695(self):
x = np.zeros(12)
data = [x.reshape((12, 1)),
x.reshape((1, 12)),
x.reshape((2, 6)),
x.reshape((6, 2)),
x.reshape((2, 2, 3)),
x.reshape((1, 2, 2, 3)),
x[0][()], # 0-dim array
]
for d in data:
self.assertRaises(ValueError, Trace, data=d)
def test_remove_response(self):
"""
Test remove_response() method against simulate() with equivalent
parameters to check response removal from Response object read from
StationXML against pure evalresp providing an external RESP file.
"""
tr1 = read()[0]
tr2 = tr1.copy()
# deconvolve from dataless with simulate() via Parser from
# dataless/RESP
parser = Parser("/path/to/dataless.seed.BW_RJOB")
tr1.simulate(seedresp={"filename": parser, "units": "VEL"},
water_level=60, pre_filt=(0.1, 0.5, 30, 50), sacsim=True,
pitsasim=False)
# deconvolve from StationXML with remove_response()
tr2.remove_response(pre_filt=(0.1, 0.5, 30, 50))
np.testing.assert_array_almost_equal(tr1.data, tr2.data)
def test_remove_polynomial_response(self):
"""
"""
from obspy import read_inventory
path = os.path.dirname(__file__)
# blockette 62, stage 0
tr = read()[0]
tr.stats.network = 'IU'
tr.stats.station = 'ANTO'
tr.stats.location = '30'
tr.stats.channel = 'LDO'
tr.stats.starttime = UTCDateTime("2010-07-23T00:00:00")
# remove response
del tr.stats.response
filename = os.path.join(path, 'data', 'stationxml_IU.ANTO.30.LDO.xml')
inv = read_inventory(filename, format='StationXML')
tr.attach_response(inv)
tr.remove_response()
# blockette 62, stage 1 + blockette 58, stage 2
tr = read()[0]
tr.stats.network = 'BK'
tr.stats.station = 'CMB'
tr.stats.location = ''
tr.stats.channel = 'LKS'
tr.stats.starttime = UTCDateTime("2004-06-16T00:00:00")
# remove response
del tr.stats.response
filename = os.path.join(path, 'data', 'stationxml_BK.CMB.__.LKS.xml')
inv = read_inventory(filename, format='StationXML')
tr.attach_response(inv)
# raises UserWarning: Stage gain not defined - ignoring
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", UserWarning)
tr.remove_response()
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, UserWarning)
def test_processing_info_remove_response_and_sensitivity(self):
"""
Tests adding processing info for remove_response() and
remove_sensitivity().
See #1247.
"""
# remove_sensitivity() with response object attached to the trace.
tr = read()[0]
self.assertNotIn("processing", tr.stats)
tr.remove_sensitivity()
self.assertIn("processing", tr.stats)
self.assertEqual(len(tr.stats.processing), 1)
self.assertTrue(tr.stats.processing[0].endswith(
"remove_sensitivity(inventory=None)"))
# With passed inventory object.
tr = read()[0]
self.assertNotIn("processing", tr.stats)
tr.remove_sensitivity(inventory=read_inventory())
self.assertIn("processing", tr.stats)
self.assertEqual(len(tr.stats.processing), 1)
self.assertIn("remove_sensitivity(inventory=<obspy.core.inventory."
"inventory.Inventory object ", tr.stats.processing[0])
# remove_response()
tr = read()[0]
self.assertNotIn("processing", tr.stats)
tr.remove_response()
self.assertIn("processing", tr.stats)
self.assertEqual(len(tr.stats.processing), 1)
self.assertIn("remove_response(", tr.stats.processing[0])
self.assertIn("inventory=None", tr.stats.processing[0])
# With passed inventory object.
tr = read()[0]
self.assertNotIn("processing", tr.stats)
tr.remove_response(inventory=read_inventory())
self.assertIn("processing", tr.stats)
self.assertEqual(len(tr.stats.processing), 1)
self.assertIn("remove_response(", tr.stats.processing[0])
self.assertIn("inventory=<obspy.core.inventory.inventory.Inventory "
"object", tr.stats.processing[0])
def test_processing_information(self):
"""
Test case for the automatic processing information.
"""
tr = read()[0]
trimming_starttime = tr.stats.starttime + 1
tr.trim(trimming_starttime)
tr.filter("lowpass", freq=2.0)
tr.simulate(paz_remove={
'poles': [-0.037004 + 0.037016j, -0.037004 - 0.037016j,
-251.33 + 0j],
'zeros': [0j, 0j],
'gain': 60077000.0,
'sensitivity': 2516778400.0})
tr.trigger(type="zdetect", nsta=20)
tr.decimate(factor=2, no_filter=True)
tr.resample(tr.stats.sampling_rate / 2.0)
tr.differentiate()
tr.integrate()
tr.detrend()
tr.taper(max_percentage=0.05, type='cosine')
tr.normalize()
pr = tr.stats.processing
self.assertIn("trim", pr[0])
self.assertEqual(
"ObsPy %s: trim(endtime=None::fill_value=None::"
"nearest_sample=True::pad=False::starttime=%s)" % (
__version__, repr(trimming_starttime)),
pr[0])
self.assertIn("filter", pr[1])
self.assertIn("simulate", pr[2])
self.assertIn("trigger", pr[3])
self.assertIn("decimate", pr[4])
self.assertIn("resample", pr[5])
self.assertIn("differentiate", pr[6])
self.assertIn("integrate", pr[7])
self.assertIn("detrend", pr[8])
self.assertIn("taper", pr[9])
self.assertIn("normalize", pr[10])
def test_no_processing_info_for_failed_operations(self):
"""
If an operation fails, no processing information should be attached
to the Trace object.
"""
# create test Trace
tr = Trace(data=np.arange(20))
self.assertFalse("processing" in tr.stats)
# This decimation by a factor of 7 in this case would change the
# end time of the time series. Therefore it fails.
self.assertRaises(ValueError, tr.decimate, 7, strict_length=True)
# No processing should be applied yet.
self.assertFalse("processing" in tr.stats)
# Test the same but this time with an already existing processing
# information.
tr = Trace(data=np.arange(20))
tr.detrend()
self.assertEqual(len(tr.stats.processing), 1)
info = tr.stats.processing[0]
self.assertRaises(ValueError, tr.decimate, 7, strict_length=True)
self.assertEqual(tr.stats.processing, [info])
def test_meta(self):
"""
Tests Trace.meta an alternative to Trace.stats
"""
tr = Trace()
tr.meta = Stats({'network': 'NW'})
self.assertEqual(tr.stats.network, 'NW')
tr.stats = Stats({'network': 'BW'})
self.assertEqual(tr.meta.network, 'BW')
def test_interpolate(self):
"""
Tests the interpolate function.
This also tests the interpolation in obspy.signal. No need to repeat
the same test twice I guess.
"""
# Load the prepared data. The data has been created using SAC.
file_ = "interpolation_test_random_waveform_delta_0.01_npts_50.sac"
org_tr = read("/path/to/%s" % file_)[0]
file_ = "interpolation_test_interpolated_delta_0.003.sac"
interp_delta_0_003 = read("/path/to/%s" % file_)[0]
file_ = "interpolation_test_interpolated_delta_0.077.sac"
interp_delta_0_077 = read("/path/to/%s" % file_)[0]
# Perform the same interpolation as in Python with ObsPy.
int_tr = org_tr.copy().interpolate(sampling_rate=1.0 / 0.003,
method="weighted_average_slopes")
# Assert that the sampling rate has been set correctly.
self.assertEqual(int_tr.stats.delta, 0.003)
# Assert that the new end time is smaller than the old one. SAC at
# times performs some extrapolation which we do not want to do here.
self.assertLessEqual(int_tr.stats.endtime, org_tr.stats.endtime)
# SAC extrapolates a bit which we don't want here. The deviations
# to SAC are likely due to the fact that we use double precision
# math while SAC uses single precision math.
self.assertTrue(np.allclose(
int_tr.data,
interp_delta_0_003.data[:int_tr.stats.npts],
rtol=1E-3))
int_tr = org_tr.copy().interpolate(sampling_rate=1.0 / 0.077,
method="weighted_average_slopes")
# Assert that the sampling rate has been set correctly.
self.assertEqual(int_tr.stats.delta, 0.077)
# Assert that the new end time is smaller than the old one. SAC
# calculates one sample less in this case.
self.assertLessEqual(int_tr.stats.endtime, org_tr.stats.endtime)
self.assertTrue(np.allclose(
int_tr.data[:interp_delta_0_077.stats.npts],
interp_delta_0_077.data,
rtol=1E-5))
# Also test the other interpolation methods mainly by assuring the
# correct SciPy function is called and everything stays internally
# consistent. SciPy's functions are tested enough to be sure that
# they work.
for inter_type in ["linear", "nearest", "zero"]:
with mock.patch("scipy.interpolate.interp1d") as patch:
patch.return_value = lambda x: x
org_tr.copy().interpolate(sampling_rate=0.5, method=inter_type)
self.assertEqual(patch.call_count, 1)
self.assertEqual(patch.call_args[1]["kind"], inter_type)
int_tr = org_tr.copy().interpolate(sampling_rate=0.5,
method=inter_type)
self.assertEqual(int_tr.stats.delta, 2.0)
self.assertLessEqual(int_tr.stats.endtime, org_tr.stats.endtime)
for inter_type in ["slinear", "quadratic", "cubic", 1, 2, 3]:
with mock.patch("scipy.interpolate.InterpolatedUnivariateSpline") \
as patch:
patch.return_value = lambda x: x
org_tr.copy().interpolate(sampling_rate=0.5, method=inter_type)
s_map = {
"slinear": 1,
"quadratic": 2,
"cubic": 3
}
if inter_type in s_map:
inter_type = s_map[inter_type]
self.assertEqual(patch.call_count, 1)
self.assertEqual(patch.call_args[1]["k"], inter_type)
int_tr = org_tr.copy().interpolate(sampling_rate=0.5,
method=inter_type)
self.assertEqual(int_tr.stats.delta, 2.0)
self.assertLessEqual(int_tr.stats.endtime, org_tr.stats.endtime)
def test_interpolation_time_shift(self):
"""
Tests the time shift of the interpolation.
"""
tr = read()[0]
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:500]
tr.interpolate(method="lanczos", sampling_rate=10.0, a=20)
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:500]
tr.stats.starttime = UTCDateTime(0)
org_tr = tr.copy()
# Now this does not do much for now but actually just shifts the
# samples.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=0.2)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 0.2)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime + 0.2)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=0.4)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 0.6)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime + 0.6)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
tr.interpolate(method="lanczos", sampling_rate=1.0, a=1,
time_shift=-0.6)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime)
np.testing.assert_allclose(tr.data, org_tr.data, atol=1E-9)
# This becomes more interesting when also fixing the sample
# positions. Then one can shift by subsample accuracy while leaving
# the sample positions intact. Note that there naturally are some
# boundary effects and as the interpolation method does not deal
# with any kind of extrapolation you will lose the first or last
# samples.
# This is a fairly extreme example but of course there are errors
# when doing an interpolation - a shift using an FFT is more accurate.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=50,
starttime=tr.stats.starttime + tr.stats.delta,
time_shift=0.2)
# The sample point did not change but we lost the first sample,
# as we shifted towards the future.
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 1.0)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime)
# The data naturally also changed.
self.assertRaises(AssertionError, np.testing.assert_allclose,
tr.data, org_tr.data[1:], atol=1E-9)
# Shift back. This time we will lose the last sample.
tr.interpolate(method="lanczos", sampling_rate=1.0, a=50,
starttime=tr.stats.starttime,
time_shift=-0.2)
self.assertEqual(tr.stats.starttime, org_tr.stats.starttime + 1.0)
self.assertEqual(tr.stats.endtime, org_tr.stats.endtime - 1.0)
# But the data (aside from edge effects - we are going forward and
# backwards again so they go twice as far!) should now again be the
# same as we started out with.
np.testing.assert_allclose(
tr.data[100:-100], org_tr.data[101:-101], atol=1E-9, rtol=1E-4)
def test_interpolation_arguments(self):
"""
Test case for the interpolation arguments.
"""
tr = read()[0]
tr.stats.sampling_rate = 1.0
tr.data = tr.data[:50]
for inter_type in ["linear", "nearest", "zero", "slinear",
"quadratic", "cubic", 1, 2, 3,
"weighted_average_slopes"]:
# If only the sampling rate is specified, the end time will be very
# close to the original end time but never bigger.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type)
self.assertEqual(tr.stats.starttime, interp_tr.stats.starttime)
self.assertTrue(tr.stats.endtime >= interp_tr.stats.endtime >=
tr.stats.endtime - (1.0 / 0.3))
# If the starttime is modified the new starttime will be used but
# the end time will again be modified as little as possible.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type,
starttime=tr.stats.starttime +
5.0)
self.assertEqual(tr.stats.starttime + 5.0,
interp_tr.stats.starttime)
self.assertTrue(tr.stats.endtime >= interp_tr.stats.endtime >=
tr.stats.endtime - (1.0 / 0.3))
# If npts is given it will be used to modify the end time.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type, npts=10)
self.assertEqual(tr.stats.starttime,
interp_tr.stats.starttime)
self.assertEqual(interp_tr.stats.npts, 10)
# If npts and starttime are given, both will be modified.
interp_tr = tr.copy().interpolate(sampling_rate=0.3,
method=inter_type,
starttime=tr.stats.starttime +
5.0, npts=10)
self.assertEqual(tr.stats.starttime + 5.0,
interp_tr.stats.starttime)
self.assertEqual(interp_tr.stats.npts, 10)
# An earlier starttime will raise an exception. No extrapolation
# is supported
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=1.0,
starttime=tr.stats.starttime - 10.0)
# As will too many samples that would overstep the end time bound.
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=1.0,
npts=tr.stats.npts * 1E6)
# A negative or zero desired sampling rate should raise.
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=0.0)
self.assertRaises(ValueError, tr.copy().interpolate,
sampling_rate=-1.0)
def test_resample_new(self):
"""
Tests if Trace.resample works as expected and test that issue #857 is
resolved.
"""
starttime = UTCDateTime("1970-01-01T00:00:00.000000Z")
tr0 = Trace(np.sin(np.linspace(0, 2 * np.pi, 10)),
{'sampling_rate': 1.0,
'starttime': starttime})
# downsample
tr = tr0.copy()
tr.resample(0.5, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), 5)
expected = np.array([0.19478735, 0.83618307, 0.32200221,
-0.7794053, -0.57356732])
self.assertTrue(np.all(np.abs(tr.data - expected) < 1e-7))
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# upsample
tr = tr0.copy()
tr.resample(2.0, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), 20)
self.assertEqual(tr.stats.sampling_rate, 2.0)
self.assertEqual(tr.stats.delta, 0.5)
self.assertEqual(tr.stats.npts, 20)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with non integer ratio
tr = tr0.copy()
tr.resample(0.75, window='hanning', no_filter=True)
self.assertEqual(len(tr.data), int(10 * .75))
expected = np.array([0.15425413, 0.66991128, 0.74610418, 0.11960477,
-0.60644662, -0.77403839, -0.30938935])
self.assertTrue(np.all(np.abs(tr.data - expected) < 1e-7))
self.assertEqual(tr.stats.sampling_rate, 0.75)
self.assertEqual(tr.stats.delta, 1 / 0.75)
self.assertEqual(tr.stats.npts, int(10 * .75))
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample without window
tr = tr0.copy()
tr.resample(0.5, window=None, no_filter=True)
self.assertEqual(len(tr.data), 5)
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with window and automatic filtering
tr = tr0.copy()
tr.resample(0.5, window='hanning', no_filter=False)
self.assertEqual(len(tr.data), 5)
self.assertEqual(tr.stats.sampling_rate, 0.5)
self.assertEqual(tr.stats.delta, 2.0)
self.assertEqual(tr.stats.npts, 5)
self.assertEqual(tr.stats.starttime, starttime)
self.assertEqual(tr.stats.endtime,
starttime + tr.stats.delta * (tr.stats.npts - 1))
# downsample with custom window
tr = tr0.copy()
window = np.ones((tr.stats.npts))
tr.resample(0.5, window=window, no_filter=True)
# downsample with bad window
tr = tr0.copy()
window = np.array([0, 1, 2, 3])
self.assertRaises(ValueError, tr.resample,
sampling_rate=0.5, window=window, no_filter=True)
def test_slide(self):
"""
Tests for sliding a window across a trace object.
"""
tr = Trace(data=np.linspace(0, 100, 101))
tr.stats.starttime = UTCDateTime(0.0)
tr.stats.sampling_rate = 5.0
# First slice it in 4 pieces. Window length is in seconds.
slices = []
for window_tr in tr.slide(window_length=5.0, step=5.0):
slices.append(window_tr)
self.assertEqual(len(slices), 4)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(5), UTCDateTime(10)))
self.assertEqual(slices[2],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
self.assertEqual(slices[3],
tr.slice(UTCDateTime(15), UTCDateTime(20)))
# Different step which is the distance between two windows measured
# from the start of the first window in seconds.
slices = []
for window_tr in tr.slide(window_length=5.0, step=10.0):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
# Offset determines the initial starting point. It defaults to zero.
slices = []
for window_tr in tr.slide(window_length=5.0, step=6.5, offset=8.5):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(8.5), UTCDateTime(13.5)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(15.0), UTCDateTime(20.0)))
# By default only full length windows will be returned so any
# remainder that can no longer make up a full window will not be
# returned.
slices = []
for window_tr in tr.slide(window_length=15.0, step=15.0):
slices.append(window_tr)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0.0), UTCDateTime(15.0)))
# But it can optionally be returned.
slices = []
for window_tr in tr.slide(window_length=15.0, step=15.0,
include_partial_windows=True):
slices.append(window_tr)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(0.0), UTCDateTime(15.0)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(15.0), UTCDateTime(20.0)))
# Negative step lengths work together with an offset.
slices = []
for window_tr in tr.slide(window_length=5.0, step=-5.0, offset=20.0):
slices.append(window_tr)
self.assertEqual(len(slices), 4)
self.assertEqual(slices[0],
tr.slice(UTCDateTime(15), UTCDateTime(20)))
self.assertEqual(slices[1],
tr.slice(UTCDateTime(10), UTCDateTime(15)))
self.assertEqual(slices[2],
tr.slice(UTCDateTime(5), UTCDateTime(10)))
self.assertEqual(slices[3],
tr.slice(UTCDateTime(0), UTCDateTime(5)))
def test_slide_nearest_sample(self):
"""
Tests that the nearest_sample argument is correctly passed to the
slice function calls.
"""
tr = Trace(data=np.linspace(0, 100, 101))
tr.stats.starttime = UTCDateTime(0.0)
tr.stats.sampling_rate = 5.0
# It defaults to True.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Force True.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5, nearest_sample=True))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Set to False.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = tr
list(tr.slide(5, 5, nearest_sample=False))
self.assertEqual(patch.call_count, 4)
for arg in patch.call_args_list:
self.assertFalse(arg[1]["nearest_sample"])
def test_remove_response_plot(self):
"""
Tests the plotting option of remove_response().
"""
tr = read("/path/to/IU_ULN_00_LH1_2015-07-18T02.mseed")[0]
inv = read_inventory("/path/to/IU_ULN_00_LH1.xml")
tr.attach_response(inv)
pre_filt = [0.001, 0.005, 10, 20]
image_dir = os.path.join(os.path.dirname(__file__), 'images')
with ImageComparison(image_dir, "trace_remove_response.png",
reltol=1.5) as ic:
tr.remove_response(pre_filt=pre_filt, output="DISP",
water_level=60, end_stage=None, plot=ic.name)
def test_normalize(self):
"""
Tests the normalize() method on normal and edge cases.
"""
# Nothing should happen with ones.
tr = Trace(data=np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, np.ones(5))
# 10s should be normalized to all ones.
tr = Trace(data=10 * np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, np.ones(5))
# Negative 10s should be normalized to negative ones.
tr = Trace(data=-10 * np.ones(5))
tr.normalize()
np.testing.assert_allclose(tr.data, -np.ones(5))
# 10s and a couple of 5s should be normalized to 1s and a couple of
# 0.5s.
tr = Trace(data=np.array([10.0, 10.0, 5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([1.0, 1.0, 0.5, 0.5]))
# Same but negative values.
tr = Trace(data=np.array([-10.0, -10.0, -5.0, -5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, -0.5, -0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, -10.0, 5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, 0.5, 0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, 10.0, -5.0, 5.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, 1.0, -0.5, 0.5]))
# Mixed values.
tr = Trace(data=np.array([-10.0, -10.0, 0.0, 0.0]))
tr.normalize()
np.testing.assert_allclose(tr.data, np.array([-1.0, -1.0, 0.0, 0.0]))
# Mixed values.
tr = Trace(data= | np.array([10.0, 10.0, 0.0, 0.0]) | numpy.array |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
import cv2
import math
import numpy as np
import imageio
from glob import glob
import os
import copy
import shutil
import skimage.metrics
import pandas as pd
import pylab as plt
import fairseq.distributed_utils as du
from plyfile import PlyData, PlyElement
from fairseq.meters import StopwatchMeter
def get_rank():
try:
return du.get_rank()
except AssertionError:
return 0
def get_world_size():
try:
return du.get_world_size()
except AssertionError:
return 1
def parse_views(view_args):
output = []
try:
xx = view_args.split(':')
ids = xx[0].split(',')
for id in ids:
if '..' in id:
a, b = id.split('..')
output += list(range(int(a), int(b)))
else:
output += [int(id)]
if len(xx) > 1:
output = output[::int(xx[-1])]
except Exception as e:
raise Exception("parse view args error: {}".format(e))
return output
def get_uv(H, W, h, w):
"""
H, W: real image (intrinsics)
h, w: resized image
"""
uv = np.flip(np.mgrid[0: h, 0: w], axis=0).astype(np.float32)
uv[0] = uv[0] * float(W / w)
uv[1] = uv[1] * float(H / h)
return uv, [float(H / h), float(W / w)]
def load_rgb(
path,
resolution=None,
with_alpha=True,
bg_color=[1.0, 1.0, 1.0],
min_rgb=-1,
interpolation='AREA'):
if with_alpha:
img = imageio.imread(path) # RGB-ALPHA
else:
img = imageio.imread(path)[:, :, :3]
img = skimage.img_as_float32(img).astype('float32')
H, W, D = img.shape
h, w = resolution
if D == 3:
img = np.concatenate([img, np.ones((img.shape[0], img.shape[1], 1))], -1).astype('float32')
uv, ratio = get_uv(H, W, h, w)
if (h < H) or (w < W):
# img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA).astype('float32')
if min_rgb == -1: # 0, 1 --> -1, 1
img[:, :, :3] -= 0.5
img[:, :, :3] *= 2.
img[:, :, :3] = img[:, :, :3] * img[:, :, 3:] + np.asarray(bg_color)[None, None, :] * (1 - img[:, :, 3:])
img[:, :, 3] = img[:, :, 3] * (img[:, :, :3] != np.asarray(bg_color)[None, None, :]).any(-1)
img = img.transpose(2, 0, 1)
return img, uv, ratio
def load_depth(path, resolution=None, depth_plane=5):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_UNCHANGED).astype(np.float32)
# ret, img = cv2.threshold(img, depth_plane, depth_plane, cv2.THRESH_TRUNC)
H, W = img.shape[:2]
h, w = resolution
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
#img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
if len(img.shape) ==3:
img = img[:,:,:1]
img = img.transpose(2,0,1)
else:
img = img[None,:,:]
return img
def load_mask(path, resolution=None):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
h, w = resolution
H, W = img.shape[:2]
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = img / (img.max() + 1e-7)
return img
def load_matrix(path):
lines = [[float(w) for w in line.strip().split()] for line in open(path)]
if len(lines[0]) == 2:
lines = lines[1:]
if len(lines[-1]) == 2:
lines = lines[:-1]
return np.array(lines).astype(np.float32)
def load_intrinsics(filepath, resized_width=None, invert_y=False):
try:
intrinsics = load_matrix(filepath)
if intrinsics.shape[0] == 3 and intrinsics.shape[1] == 3:
_intrinsics = | np.zeros((4, 4), np.float32) | numpy.zeros |
"""
<NAME>
<NAME>
Classes walk and dla
"""
import numpy as np
import numba as nb
from scipy import stats
import matplotlib.pyplot as plt
class walk:
"""
Random walk
"""
def __init__(self, start_point):
"""
Define variables
"""
self.start_point = np.asarray(start_point)
self.walk_length = 1
self.points = None
self.dist = None
def random(self, walk_length):
"""
Find points on a random walk of given length on a square lattice
"""
walk_length = int(walk_length)
self.walk_length = walk_length
# empty variable for points
points = np.empty((walk_length, 2))
# populate first column with random numbers
points[:, 0] = np.random.randint(0, 4, walk_length)
# map random numbers to a direction
points[np.where(points[:, 0] == 0)[0]] = [-1, 0]
points[np.where(points[:, 0] == 1)[0]] = [1, 0]
points[np.where(points[:, 0] == 2)[0]] = [0, 1]
points[np.where(points[:, 0] == 3)[0]] = [0, -1]
# set start point
points[0] = self.start_point
# cumulatively sum points
points_sum = np.cumsum(points, axis=0)
self.points = points_sum
return points_sum
def random_loop(self, walk_length):
"""
Find points on a random walk of given length using a loop
Should not use over random()
"""
self.walk_length = walk_length
points = np.zeros([walk_length, 2])
points[0] = self.start_point
# iterate over walk length
for i in range(1, walk_length):
# random direction (right, down, left, up)
direction = {0:np.array([1, 0]),
1:np.array([0, -1]),
2:np.array([-1, 0]),
3:np.array([0, 1])}.get(np.random.randint(0, 4))
# append to list
points[i] = points[i-1] + direction
self.points = points
return points
def random_3d(self, walk_length):
"""
Find points on a random walk of given length on a simple cubic lattice
"""
walk_length = int(walk_length)
# empty variable for points
points = np.empty((walk_length, 3))
# populate first column with random numbers
points[:, 0] = np.random.randint(0, 6, walk_length)
# map random numbers to a direction
points[np.where(points[:, 0] == 0)[0]] = [-1, 0, 0]
points[np.where(points[:, 0] == 1)[0]] = [1, 0, 0]
points[np.where(points[:, 0] == 2)[0]] = [0, 1, 0]
points[np.where(points[:, 0] == 3)[0]] = [0, -1, 0]
points[np.where(points[:, 0] == 4)[0]] = [0, 0, 1]
points[ | np.where(points[:, 0] == 5) | numpy.where |
import os
import sys
import json
import datetime
import numpy as np
import pandas as pd
import statistics
import cv2
import skimage.draw
import tensorflow as tf
import keras
import time
import glob
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import RandomizedSearchCV
import joblib
from .mrcnn.config import Config
from .mrcnn import model as modellib, utils
from .mask_saving import Masked_Image
from .Preprocessing_opt import PreprocessImages
class Sputum:
"""
Class Sputum contains all the necessary methods for the Mask RCNN Implementation,
including the training, inference and prediction. Then the Machine Learning Model
for evalutation and a scoring method for the final evaluation.
Parameters
----------
root_dir : str
Path to the root directory. In this directory the algorithm will be saving all the important files
such as coco weight file, then inside the log folder, the trained weight files will be saved after
every epoch. It is mandatory to set the root_dir.
"""
def __init__(self,root_dir=None):
self.root_dir = root_dir
def train_mask_rcnn(self,dataset_path,weights,epochs):
"""
This function trains the mask rcnn model based on the dataset and the weights.
Parameters
----------
dataset_path : str
Path to the dataset. Inside this path there should be two folders: train and val.
Inside this train and val folders the annotation file with respect to it should be
saved as "train.json" only in both the folers.
weights : str, options= "coco" or path to the saved weight file
Path to the weight file. If pretained weight exist give that path else give "coco", which will
download the coco weight file from the internet and save it inside the root directory."
epochs : int
Number of epochs for training.
Returns
-------
Weight Files
Trained weight files will be saved inside the root directory.
"""
# Root directory of the project
ROOT_DIR = os.path.abspath(self.root_dir)
print("Root Directory is: ",ROOT_DIR)
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
print("Model Directory is: ",MODEL_DIR)
# Path to trained weights file
if weights=='coco':
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_WEIGHTS_PATH):
utils.download_trained_weights(COCO_WEIGHTS_PATH)
else:
COCO_WEIGHTS_PATH=weights
if not os.path.exists(COCO_WEIGHTS_PATH):
print("Invalid Path to weights file")
# Directory to save logs and model checkpoints, if not provided
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
config = SputumConfig()
############################################################
# Dataset
############################################################
class SputumDataset(utils.Dataset):
def load_sputum(self, dataset_dir, subset):
"""Load a subset of the hail dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one classes to add.
self.add_class("Sputum", 1, "sputum")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
annotations = json.load(open(os.path.join(dataset_dir, "train.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [s['region_attributes'] for s in a['regions']]
class_ids = [int(n['Sputum']) for n in objects]
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
###changed###
for i,p in enumerate(polygons):
all_p_x=np.array(p['all_points_x'])
all_p_y=np.array(p['all_points_y'])
all_p_x[all_p_x>=width]=width-1
all_p_y[all_p_y>=height]=height-1
polygons[i]['all_points_x']=list(all_p_x)
polygons[i]['all_points_y']=list(all_p_y)
self.add_image(
"Sputum",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
class_ids=class_ids)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a hail dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "Sputum":
return super(self.__class__, self).load_mask(image_id)
class_ids = image_info['class_ids']
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
print("info['class_ids']=", info['class_ids'])
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids#[mask.shape[-1]] #np.ones([mask.shape[-1]], dtype=np.int32)#class_ids.astype(np.int32)
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "Sputum":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
# Training dataset.
dataset_train = SputumDataset()
dataset_train.load_sputum(dataset_path, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = SputumDataset()
dataset_val.load_sputum(dataset_path, "val")
dataset_val.prepare()
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=epochs,
layers='heads')
print("Training is Complete.")
print("Model Saved at :",MODEL_DIR)
def crop_maskrcnn(self,model_dir, coco_model_path, image_path, image_name, output_path):
"""This function crops the bounding box image based on the maskrcnn model output. Mask RCNN
algorithm detects the object of interest in the imagine and the shaded part detected is cropped and
saved to the output_path.
Parameters
----------
model_dir : str
Path to the model directory.
model_path : str
Path to the trained model or coco model.
image_path : str
Path to the image for which cropping is to be done.
image_name : str
Name of the image.
output_path : str
Path to the output directory where the image is to be saved.
Returns
-------
Images
The Predicted part will be cropped and saved in the output directory.
"""
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
class InferenceConfig(SputumConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
session = tf.Session()
keras.backend.set_session(session)
configs = InferenceConfig()
with modellib.MaskRCNN(mode="inference", model_dir=model_dir, config=configs) as models:
models.load_weights(coco_model_path, by_name=True)
print('Sputum weight file loaded')
preprocess_obj = PreprocessImages()
preprocess_obj.crop_run(image_path,image_name,output_path,models,session)
def mask_saving(self,model_dir, coco_model_path, image_path, image_name, output_path):
"""The Mask RCNN model when given a image detects the area of interest. This method
saves that predictions as a image file which can be used for evaluating how good the
model is. The Image is saved in the output_path given.
Parameters
----------
model_dir : str
Path to the model directory.
model_path : str
Path to the trained model or coco model.
image_path : str
Path to the image for which cropping is to be done.
image_name : str
Name of the image.
output_path : str
Path to the output directory where the image is to be saved.
Returns
-------
Images
The Predicted part will be saved in the output directory.
"""
class SputumConfig(Config):
"""Configuration for training on the hail, hand dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Sputum"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # Background + ridge + valley
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = 0.9
class InferenceConfig(SputumConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
session = tf.Session()
keras.backend.set_session(session)
configs = InferenceConfig()
with modellib.MaskRCNN(mode="inference", model_dir=model_dir, config=configs) as models:
models.load_weights(coco_model_path, by_name=True)
print('Sputum weight file loaded')
preprocess_obj = Masked_Image()
preprocess_obj.sputum_mask_segment(image_path,image_name,output_path,models,session)
def sputum_ml_feature_extract(self, path_to_csv, crop_dir, feature_folder):
"""Based on the cropped images, this method extracts the features from the images. This
feature extraction method is for training the Machine Learning model. There is 24 features
extracted without including the score.
Parameters
----------
path_to_csv : str
Path to csv files containing the labels and other attributes.
crop_dir : str
Path to the directory where the cropped images are saved.
feature_folder : str
Path to the directory where the features are to be saved.
Returns
-------
Parquet file
A Parquet file will be saved inside the feature folder containing various features.
The File format will be having a timestamp.
"""
df=pd.read_csv(path_to_csv)
result=[]
for i in range(len(df)):
s = df['Score'][i]
image_name=df['image_name'][i]
img_path=os.path.join(crop_dir,image_name)
img=cv2.imread(img_path)
img_resized=cv2.resize(img,(110,70))
b,g,r=cv2.split(img_resized)
b_list = []
for i in range(len(b)):
for j in range(len(b[i])):
b_list.append(b[i][j])
g_list = []
for i in range(len(g)):
for j in range(len(g[i])):
g_list.append(g[i][j])
r_list = []
for i in range(len(r)):
for j in range(len(r[i])):
g_list.append(r[i][j])
b_a = np.array(b_list)
g_a = np.array(g_list)
r_a = np.array(r_list)
if len(b_a)!=0:
b_mean = cv2.mean(b)
b_max = np.max(b_a)
b_median = statistics.median(b_a)
b_10 = np.percentile(b_a, 10)
b_25 = np.percentile(b_a, 25)
b_50 = np.percentile(b_a, 50)
b_75 = np.percentile(b_a, 75)
b_100 = np.percentile(b_a, 100)
else:
b_mean = [0]
b_max = 0
b_median = 0
b_10 = 0
b_25 = 0
b_50 = 0
b_75 = 0
b_100 = 0
if len(g_a)!=0:
g_mean = cv2.mean(g)
g_median = statistics.median(g_a)
g_max = np.max(g_a)
g_10 = np.percentile(g_a, 10)
g_25 = np.percentile(g_a, 25)
g_50 = np.percentile(g_a, 50)
g_75 = np.percentile(g_a, 75)
g_100 = np.percentile(g_a, 100)
else:
g_mean = [0]
g_median = 0
g_max = 0
g_10 = 0
g_25 = 0
g_50 = 0
g_75 = 0
g_100 = 0
if len(r_a)!=0:
r_mean = cv2.mean(r)
r_max = np.max(r_a)
r_median = statistics.median(r_a)
r_10 = | np.percentile(r_a, 10) | numpy.percentile |
import base64
import functools
import logging
import pickle
from io import BytesIO
import numpy as np
import pytest
from scipy import stats
from emgdecomp.decomposition import EmgDecomposition, compute_percentage_coincident
from emgdecomp.parameters import EmgDecompositionParams
from ._simulations import simulate_emg
NUM_SYMBOLS = 3
NUM_CHANNELS = 3
NUM_SAMPLES_PER_SYMBOL = 9
def _idfn(key, val):
return f'{key}={str(val)}'
class TestEmgDecomposition(object):
@staticmethod
def _generate_simulated_data():
JITTER_RANGE = 20
INTERPULSE_INTERVAL = 100
SYMBOL_SCALE = 1.0 * 1000
NOISE_STD = 0.05 * 1000
NUM_REPS = 200
NUM_SAMPLES = NUM_REPS * INTERPULSE_INTERVAL
np.random.seed(1)
state = np.random.get_state()
encoded = base64.b64encode(pickle.dumps(state)).decode('ascii')
print('To reproduce an error, base64 decode, unpickle & set the numpy random state to')
print(encoded)
# np.random.set_state(pickle.loads(base64.b64decode('<paste in>')))
data = np.zeros((NUM_CHANNELS, NUM_SAMPLES))
impulses_no_jitter_indices = np.tile(np.arange(NUM_REPS) * INTERPULSE_INTERVAL, (NUM_SYMBOLS, 1))
impulses_indices = impulses_no_jitter_indices + np.random.randint(low=-JITTER_RANGE, high=JITTER_RANGE,
size=impulses_no_jitter_indices.shape)
impulses_indices[impulses_indices < 0] = 0
impulses = np.zeros((NUM_SYMBOLS, NUM_SAMPLES))
for symidx in range(NUM_SYMBOLS):
impulses[symidx, impulses_indices[symidx, :]] = 1
waveforms = np.random.normal(loc=0.0, scale=SYMBOL_SCALE,
size=(NUM_SYMBOLS, NUM_CHANNELS, NUM_SAMPLES_PER_SYMBOL))
sources = np.empty((NUM_SYMBOLS, NUM_CHANNELS, NUM_SAMPLES))
for chidx in range(NUM_CHANNELS):
for symidx in range(NUM_SYMBOLS):
sources[symidx, chidx, :] = np.convolve(impulses[symidx, :], waveforms[symidx, chidx, :], mode='same')
for chidx in range(NUM_CHANNELS):
for symidx in range(NUM_SYMBOLS):
data[chidx, :] = data[chidx, :] + sources[symidx, chidx, :]
noise = np.random.normal(scale=NOISE_STD, size=data.shape)
data_power = np.divide(np.sum(np.power(data, 2), axis=1), data.shape[1])
noise_var = np.var(noise, axis=1)
snr = np.divide(data_power, noise_var)
print('Noiseless power of data {}, noise var of data {}, SNR={}'.format(
data_power, noise_var, 10 * np.log10(snr)))
data = data + noise
return data, impulses_indices, waveforms
@pytest.fixture
def parameters(self):
return EmgDecompositionParams(
extension_factor=30,
maximum_num_sources=50,
sampling_rate=1000.0,
max_similarity=0.95,
sil_threshold=0.9,
contrast_function='cube',
)
@pytest.mark.parametrize(
'contrast_function', ['cube', 'logcosh', 'square'], ids=functools.partial(_idfn, 'contrast_function'))
def test_simulated_data_contrast_functions(self, contrast_function, parameters):
data, impulses_indices, _ = self._generate_simulated_data()
parameters.contrast_function = contrast_function
decomp = EmgDecomposition(
params=parameters,
use_dask=False,
use_cuda=False)
firings = decomp.decompose(data)
num_sources = decomp.num_sources()
if num_sources < NUM_SYMBOLS:
pytest.fail('3 deduped sources were not found; only {} were found.'.format(num_sources))
try:
self._assert_decomp_successful(decomp, data, firings, impulses_indices)
except AssertionError:
if contrast_function == 'logcosh':
pytest.skip('logcosh test doesnt pass on this simulated data but seems to work on real data, so '
'skipping this test.')
return
raise
@pytest.mark.parametrize('use_dask', [False, True], ids=functools.partial(_idfn, 'use_dask'))
@pytest.mark.parametrize('use_cuda', [False, True], ids=functools.partial(_idfn, 'use_cuda'))
def test_simulated_data_dask_cuda(self, use_dask, use_cuda, parameters):
# Tests different combinations of dask and cuda, if available on this machine.
if use_cuda:
try:
import cupy
except (ModuleNotFoundError, ImportError) as e:
pytest.skip(f'Could not test CUDA; cupy failed to import. {e}')
return
if use_dask:
try:
from distributed import Client
client = Client(processes=False)
except (ModuleNotFoundError, ImportError) as e:
pytest.skip(f'Could not test DASK; dask failed to import. {e}')
return
data, impulses_indices, _ = self._generate_simulated_data()
decomp = EmgDecomposition(
params=parameters,
use_dask=use_dask,
use_cuda=use_cuda)
firings = decomp.decompose(data)
num_sources = decomp.num_sources()
if num_sources < NUM_SYMBOLS:
pytest.fail('3 deduped sources were not found; only {} were found.'.format(num_sources))
self._assert_decomp_successful(decomp, data, firings, impulses_indices)
# Assert saving / loading the entire EmgDecomposition object works.
io = BytesIO()
decomp.save(io)
io.seek(0)
decomp_rt = EmgDecomposition.load(io)
firings_rt = decomp_rt.transform(data)
self._assert_decomp_successful(decomp_rt, data, firings_rt, impulses_indices)
def _assert_decomp_successful(self, decomp, data, peaks, impulses_indices):
extension_factor = decomp.params.extension_factor
num_sources = decomp.num_sources()
print(np.unique(peaks['source_idx']))
identified = {sidx: set() for sidx in range(num_sources)}
percentages = dict()
for sidx in range(num_sources):
p = peaks[peaks['source_idx'] == sidx]['discharge_samples']
# Find the actual source we're closest to
closest_sidxs = np.empty((impulses_indices.shape[0],))
percentage = np.empty((impulses_indices.shape[0],))
for actual_sidx in range(impulses_indices.shape[0]):
nearests = []
for detected_peak in p:
deltas = impulses_indices[actual_sidx, :] - detected_peak
arg_min = np.argmin(np.abs(deltas))
nearests.append(deltas[arg_min])
mode, count = stats.mode(nearests)
closest_sidxs[actual_sidx] = mode[0]
percentage[actual_sidx] = 100.0 * count[0] / len(nearests)
closest_sidx = np.argmax(percentage)
identified[closest_sidx].add(sidx)
percentages[sidx] = percentage[closest_sidx]
unaccounted = impulses_indices.shape[1] - len(p)
print('Estimated source {} was closest to actual source {}: mean/STD {}, {} [unaccounted={}]'.format(
sidx, closest_sidx, closest_sidxs[closest_sidx], percentage[closest_sidx],
unaccounted))
# Assert that we have at least one matching estimated source to the actual source
for actual_sidx in range(NUM_SYMBOLS):
assert len(identified[actual_sidx]) > 0
ps = [percentages[sidx] for sidx in identified[actual_sidx]]
assert np.max(ps) > 93.0
waveforms_by_source = decomp.muap_waveforms(data, peaks)
assert len(waveforms_by_source) == decomp.num_sources()
for wfs in waveforms_by_source.values():
assert wfs.shape[0] > 0
assert wfs.shape[1] == NUM_CHANNELS
assert wfs.shape[2] == extension_factor
def test_testing_performance(self, parameters):
np.random.seed(1)
num_units = 5
tot_time = 120.
firing_rate = 10.
sampling_rate = 1000.
n_chans = 20
params = parameters
params.sampling_rate = sampling_rate
params.maximum_num_sources = 30
_data, _spike_indices = simulate_emg(num_units, tot_time, firing_rate, sampling_rate, n_chans)
split_index = int(_data.shape[1] / 2)
train_data = _data[:, :split_index]
train_spike_indices = [indices[indices < split_index] for indices in _spike_indices]
test_data = _data[:, split_index:]
test_spike_indices = [indices[indices >= split_index] - split_index for indices in _spike_indices]
decomp = EmgDecomposition(params=params)
train_data = np.float32(train_data)
peaks_train = decomp.decompose(train_data)
estimated_train = decomp.projected_data(train_data)
peaks_test = decomp.transform(np.float32(test_data))
estimated_test = decomp.projected_data(test_data)
n_sources = estimated_train.shape[0]
if n_sources < num_units:
pytest.fail('{} deduped sources were not found; only {} were found.'.format(num_units, n_sources))
for mode, peaks, spike_indices in [('train', peaks_train, train_spike_indices),
('test', peaks_test, test_spike_indices)]:
source_indexes = np.unique(peaks['source_idx'])
coincidence = np.empty((num_units, n_sources))
for unit_idx in range(num_units):
for j, source_idx in enumerate(source_indexes):
p = peaks[peaks['source_idx'] == source_idx]['discharge_samples']
coincidence[unit_idx, j] = compute_percentage_coincident(spike_indices[unit_idx], p)
max_perc_detected = 100 * np.max(coincidence, axis=1)
best_sources = np.argmax(coincidence, axis=1)
assert np.all(np.max(coincidence, axis=1) > 0.95)
logging.info('\n\n')
for unit_idx in range(num_units):
n_detected = len(
peaks[peaks['source_idx'] == source_indexes[best_sources[unit_idx]]]['discharge_samples'])
logging.info(f'% spikes detected for unit {unit_idx}: {max_perc_detected[unit_idx]}'
f'; best source is source {best_sources[unit_idx]};'
f' N spikes detected {n_detected} over {len(spike_indices[unit_idx])}.')
def test_batch_is_adding_sources(self, parameters):
np.random.seed(2)
num_units = 3
tot_time = 30.
firing_rate = 10.
sampling_rate = 2000.
n_chans = 10
parameters.sampling_rate = sampling_rate
parameters.waveform_duration_ms = 30
parameters.pre_spike_waveform_duration_ms = 10
data, spike_indices = simulate_emg(num_units, tot_time, firing_rate, sampling_rate, n_chans)
# 1) First normal decomposition
decomp = EmgDecomposition(params=parameters)
decomp.decompose(data)
# 2) Batch decomposition on new different data
num_units = 3
tot_time = 60.
model = decomp.model
old_sources = model.components.get_sources()
old_thresholds = model.components.get_thresholds()
old_waveforms = model.components.get_waveforms()
del decomp
new_data, new_spike_indices = simulate_emg(num_units, tot_time, firing_rate, sampling_rate, n_chans)
batch_decomp = EmgDecomposition(params=parameters)
batch_decomp.model = model
batch_decomp.decompose_batch(data=new_data)
n_old_sources = old_sources.shape[1]
n_sources = len(batch_decomp.model.components)
assert n_sources >= n_old_sources
np.testing.assert_array_almost_equal(batch_decomp.model.components.get_thresholds()[:n_old_sources],
old_thresholds)
waveforms = batch_decomp.model.components.get_waveforms()
for idx, waveform in old_waveforms.items():
np.testing.assert_array_almost_equal(waveforms[idx], waveform)
np.testing.assert_array_almost_equal(batch_decomp.model.components.get_sources()[:, :n_old_sources],
old_sources)
def test_decompose_and_batch_performance(self, parameters):
np.random.seed(2)
num_units = 3
tot_time = 60.
firing_rate = 10.
sampling_rate = 2000.
n_chans = 20
extension_factor = 30
parameters.extension_factor = extension_factor
parameters.sampling_rate = sampling_rate
data, spike_indices = simulate_emg(num_units, tot_time, firing_rate, sampling_rate, n_chans)
# 1) First normal decomposition
decomp = EmgDecomposition(params=parameters)
peaks = decomp.decompose(data)
num_sources = decomp.num_sources()
if num_sources < num_units:
pytest.fail('{} deduped sources were not found; only {} were found.'.format(num_units, num_sources))
source_indexes = np.unique(peaks['source_idx'])
coincidence = | np.empty((num_units, num_sources)) | numpy.empty |
import unittest
import numpy as np
from classical_plate_theory import Ply, InputError, Laminae, Laminate
class TestCompositePlateClasses(unittest.TestCase):
""" Defines a series of tests for the CompositePlate Module
"""
def test_ply_E1_value(self):
""" Checks to see if the input value for E1 is taken by the class
"""
test_ply = Ply(E1=1.0,E2=1.0,G12=1.0,nu12=1.0,h=1.0);
self.assertEqual(test_ply.E1, 1.0)
def test_ply_h_gt_0(self):
""" Checks to see that an InputError is raised for a h=0 input
"""
with self.assertRaises(InputError):
test_ply = Ply(E1=1.0,E2=1.0,G12=1.0,nu12=1.0,h=0.0);
def test_laminae_matrix_orotropic(self):
""" Test the laminae matrix against a known example
'Fiber-Reinforced Composites' by Mallick (3rd edition),
Example 3.6
Test will check to see that the laminae stiffness matrix
matches the expected stiffness matrix from Example 3.6 with
a maximum error of 1 decimal place (values in book given to
1 decimal place).
"""
ply = Ply(E1=133.4, E2=8.78, nu12=0.26, G12=3.254, h=1.0) # h is not used, just adding 1 as a placeholder
laminae = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))
Q_bar = laminae.Q_bar
Q_bar_expected = np.matrix([[40.11, 33.61, 31.3],
[33.61, 40.11, 31.3],
[31.3, 31.3, 34.57]])
Q_max_diff = np.max(np.abs(Q_bar -Q_bar_expected))
self.assertAlmostEqual(Q_max_diff,0,places=1)
def test_laminate_matrix_angleply(self):
""" Test an angle ply laminate matrix against a known example.
'Fiber-Reinforced Composites' by Mallick (3rd edition),
Example 3.7a
Test will check that the laminate stiffness matrix matches
the expected stiffness matrices from example 3.7a with a
maximum normalized error of 3 decimal places (< 0.1% error)
This test will throw a 'RuntimeWarning: invalid value encountered
in divide' because of the zero elements in the A, B and D
matrices, this is okay, ignore the warning.
"""
ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]
laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))
laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))
laminae_list = [laminae_pos45, laminae_neg45]
laminate = Laminate(laminae_list)
A_expected = np.power(10.0,6.0)*np.matrix([[481.32, 403.32, 0.0],
[403.32, 481.32, 0.0],
[0.0, 0.0, 414.84]]);
A_diff_norm_max = np.nanmax(np.abs(A_expected -laminate.A)/laminate.A)
B_expected = np.power(10.0,3.0)*np.matrix([[0.0, 0.0, -1126.8],
[0.0, 0.0, -1126.8],
[-1126.8, -1126.8, 0.0]])
B_diff_norm_max = np.nanmax(np.abs(B_expected -laminate.B)/laminate.B)
D_expected = np.matrix([[5775.84, 4839.84, 0.0],
[4839.84, 5775.84, 0.0],
[0.0, 0.0, 4978.08]])
D_diff_norm_max = np.nanmax(np.abs(D_expected -laminate.D)/laminate.D)
max_norm_diff = np.max([A_diff_norm_max,B_diff_norm_max,D_diff_norm_max])
self.assertAlmostEqual(max_norm_diff,0.0,places=3)
def test_laminate_matrix_symmetricbalanced(self):
""" Test a symmetric balanced ply laminate matrix against a
known example.
'Fiber-Reinforced Composites' by Mallick (3rd edition),
Example 3.7b
Test will check that the laminate stiffness matrix matches
the expected stiffness matrices from example 3.7b with a
maximum normalized error of 3 decimal places (< 0.1% error)
Symmetric Laminate should have A16, A26 = 0, B = 0
"""
ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]
laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))
laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))
laminae_list = [laminae_pos45, laminae_neg45,
laminae_neg45, laminae_pos45]
laminate = Laminate(laminae_list)
A_expected = np.power(10.0,6.0)*np.matrix([[962.64, 806.64, 0.0],
[806.64, 962.64, 0.0],
[0.0, 0.0, 829.68]]);
A_diff_norm_max = np.nanmax(np.abs(A_expected -laminate.A)/laminate.A)
B_expected = np.matrix([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
B_diff_norm_max = np.max(np.abs(B_expected -laminate.B))
D_expected = np.power(10.,3.)*np.matrix([[46.21, 38.72, 27.04],
[38.72, 46.21, 27.04],
[27.04, 27.04, 39.82]])
D_diff_norm_max = np.nanmax(np.abs(D_expected -laminate.D)/laminate.D)
max_norm_diff = np.max([A_diff_norm_max,B_diff_norm_max,D_diff_norm_max])
self.assertAlmostEqual(max_norm_diff,0.0,places=3)
def test_laminate_matrix_inverse(self):
"""Checks the inverse (A1,B1,C1,D1) matrices to see they form properly.
'Fiber-Reinforced Composites' by Mallick (3rd edition),
Example 3.13
Test will use data from expample 3.13 to check the "inverse"
relationship stiffness matrices with a maximum normalized
error of 2 decimal places (< 1% error).
"""
ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]
laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))
laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))
laminae_list = [laminae_pos45, laminae_neg45]
laminate = Laminate(laminae_list)
A1_expected = np.power(10.0,-9.0)*np.matrix([[7.7385, -5.0715, 0.0],
[-5.0715, 7.7385, 0.0],
[0.0, 0.0, 5.683]]);
A1_diff_norm_max = np.nanmax(np.abs(A1_expected -laminate.A_1)/laminate.A_1)
B1_expected = np.power(10.0,-9.0)*np.matrix([[0.0, 0.0, 603.54],
[0.0, 0.0, 603.54],
[602.74, 602.74, 0.0]])
B1_diff_norm_max = np.nanmax(np.abs(B1_expected -laminate.B_1)/laminate.B_1)
C1_expected = np.power(10.0,-9.0)*np.matrix([[0.0, 0.0, 602.74],
[0.0, 0.0, 602.74],
[603.54, 603.54, 0.0]])
C1_diff_norm_max = np.nanmax(np.abs(C1_expected -laminate.C_1)/laminate.C_1)
D1_expected = np.power(10.,-4.0)*np.matrix([[6.45, -4.23, 0.0],
[-4.23, 6.45, 0.0],
[0.0, 0.0, 4.74]])
D1_diff_norm_max = np.nanmax(np.abs(D1_expected -laminate.D_1)/laminate.D_1)
max_norm_diff = np.nanmax([A1_diff_norm_max,B1_diff_norm_max,C1_diff_norm_max,D1_diff_norm_max])
self.assertAlmostEqual(max_norm_diff,0.0,places=2)
def test_laminate_applied_force_for_strains(self):
"""Apply a force and calculate the resultant laminate midplane strains.
'Fiber-Reinforced Composites' by Mallick (3rd edition),
Example 3.13
Test will check that the resultant strains match the expected
strains from example 3.13 with a maximum normalized error of
2 decimal places (< 1% error)
"""
ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]
laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))
laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))
laminae_list = [laminae_pos45, laminae_neg45]
laminate = Laminate(laminae_list)
N = np.matrix.transpose(np.matrix([100.0e3, 0.0, 0.0])); # N[0] is in [N/m]
M = np.matrix.transpose(np.matrix([0.0, 0.0, 0.0]));
strain_dictionary = laminate.applied_stress(N,M)
Epsilon = strain_dictionary['Epsilon']
Kappa = strain_dictionary['Kappa']
Epsilon_expected = np.matrix.transpose(np.matrix([77.385e-5, -50.715e-5, 0.0]))
Kappa_expected = np.matrix.transpose(np.matrix([0.0, 0.0, 0.060354]))
Epsilon_diff_norm_max = np.nanmax(np.abs(Epsilon_expected -Epsilon)/Epsilon)
Kappa_diff_norm_max = np.nanmax(np.abs(Kappa_expected -Kappa)/Kappa)
max_norm_diff = | np.nanmax([Epsilon_diff_norm_max, Kappa_diff_norm_max]) | numpy.nanmax |
# -*- coding: utf-8 -*-
_show_plots_ = False
print("""
***********************************************************
*
*
* Integrodifferential Propagator Demo
*
*
***********************************************************
""")
import time
import matplotlib.pyplot as plt
import quantarhei as qr
import numpy
##########################################################
#
# DEMONSTRATION OF THE FFT METHOD
#
##########################################################
Om = 2.0*numpy.pi/30.0
g1 = 1.0/100.0
a0 = 1.0
#
# This is how to problem is solved
#
tt = qr.TimeAxis(0.0, 300, 1.0)
gg = 5.0/tt.data[tt.length-1]
#
om = (2.0*numpy.pi)*numpy.fft.fftfreq(tt.length, tt.step)
aom = numpy.zeros(tt.length, dtype=qr.COMPLEX)
for ii in range(tt.length):
aom[ii] = a0/(-1j*om[ii] + 1j*Om + g1 + gg)
at = numpy.fft.fft(aom)*numpy.exp(gg*tt.data)/tt.data[tt.length-1]
#
# Expected solution
#
atsol = a0*numpy.exp(-1j*Om*tt.data)*numpy.exp(-g1*tt.data)
at[0] = a0
#
# Plot the results
#
if _show_plots_:
tshow = tt.data[:tt.length//2]
atshow = at[:tt.length//2]
atsolshow = atsol[:tt.length//2]
plt.plot(tshow, numpy.real(atshow))
plt.plot(tshow, numpy.real(atsolshow))
plt.show()
###############################################################
#
# SOLVING LIOUVILLE-VON NEUMANN EQUATION BY FFT METHOD
#
###############################################################
def test_kernel(timeaxis, ham, operators, rates, ctime):
"""Returns a simple kernel for tests
"""
dim = ham.dim
Nt = timeaxis.length
MM = numpy.zeros((Nt, dim, dim, dim, dim), dtype=qr.COMPLEX)
gamma = 1.0/ctime
if dim == 2:
sys_ops = operators
sbi = qr.qm.SystemBathInteraction(sys_operators=sys_ops, rates=rates)
lbf = qr.qm.LindbladForm(ham, sbi, as_operators=False)
#return lbf.data
for ti in range(Nt):
tm = timeaxis.data[ti]
MM[ti,:,:,:,:] = -lbf.data*numpy.exp(-gamma*tm)
return MM
from quantarhei.qm.liouvillespace.integrodiff.integrodiff \
import IntegrodiffPropagator
timea = qr.TimeAxis(0.0, 200, 0.5)
Nt = timea.length
ham = qr.Hamiltonian(data=[[0.0, 0.1],
[0.1, 0.01]])
#
# Propagation without relaxation
#
ip1 = IntegrodiffPropagator(timea, ham,
timefac=3, decay_fraction=2.0)
np = qr.ReducedDensityMatrixPropagator(timea, ham)
rhoi = qr.ReducedDensityMatrix(data=[[0.0, 0.0],[0.0, 1.0]])
t1 = time.time()
rhot_i = ip1.propagate(rhoi)
t2 = time.time()
print("Propagated in frequency domain in:", t2-t1)
t1 = time.time()
rhot_n = np.propagate(rhoi)
t2 = time.time()
print("Propagated in time domain in:", t2-t1)
if _show_plots_:
plt.plot(timea.data, numpy.real(rhot_n.data[:,0,0]),"-b")
plt.plot(timea.data, numpy.real(rhot_n.data[:,1,1]),"-r")
plt.plot(timea.data, numpy.real(rhot_i.data[:,0,0]),"--g")
plt.plot(timea.data, numpy.real(rhot_i.data[:,1,1]),"--k")
#plt.axis([0,10,0,1])
plt.show()
#rhot_i.plot(coherences=False, show=True)
#rhot_n.plot(coherences=False, show=True)
#
# Propagation with relaxation kernel
#
K01 = qr.qm.ProjectionOperator(0,1,ham.dim)
K10 = qr.qm.ProjectionOperator(1,0,ham.dim)
sys_ops = [K01, K10]
rates = [1.0/30.0, 1.0/20.0]
ker = test_kernel(timea, ham, sys_ops, rates, ctime=20.0)
# time domain propagator
ip2 = IntegrodiffPropagator(timea, ham, kernel=ker,
fft=False, cutoff_time=80)
# frequency domain propagator
ip3 = IntegrodiffPropagator(timea, ham, kernel=ker,
fft=True, timefac=3, decay_fraction=2.0)
t1 = time.time()
rhot_k = ip2.propagate(rhoi)
t2 = time.time()
print("Propagated in time domain in:", t2-t1)
t1 = time.time()
rhot_k3 = ip3.propagate(rhoi)
t2 = time.time()
print("Propagated in frequency domain in:", t2-t1)
#plotit = True
if _show_plots_:
plt.plot(timea.data, numpy.real(rhot_k.data[:,0,0]),"-b")
plt.plot(timea.data, numpy.real(rhot_k.data[:,1,1]),"-r")
plt.plot(timea.data, | numpy.real(rhot_k3.data[:,0,0]) | numpy.real |
import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INTERPOLATION = 1,
VECTOR_ARITHMETIC = 2
def postprocess_generated_img(generated_img_tensor):
assert isinstance(generated_img_tensor, torch.Tensor), f'Expected PyTorch tensor but got {type(generated_img_tensor)}.'
# Move the tensor from GPU to CPU, convert to numpy array, extract 0th batch, move the image channel
# from 0th to 2nd position (CHW -> HWC)
generated_img = np.moveaxis(generated_img_tensor.to('cpu').numpy()[0], 0, 2)
# If grayscale image repeat 3 times to get RGB image (for generators trained on MNIST)
if generated_img.shape[2] == 1:
generated_img = np.repeat(generated_img, 3, axis=2)
# Imagery is in the range [-1, 1] (generator has tanh as the output activation) move it into [0, 1] range
generated_img -= np.min(generated_img)
generated_img /= np.max(generated_img)
return generated_img
def generate_from_random_latent_vector(generator, cgan_digit=None):
with torch.no_grad():
latent_vector = utils.get_gaussian_latent_batch(1, next(generator.parameters()).device)
if cgan_digit is None:
generated_img = postprocess_generated_img(generator(latent_vector))
else: # condition and generate the digit specified by cgan_digit
ref_label = torch.tensor([cgan_digit], dtype=torch.int64)
ref_label_one_hot_encoding = torch.nn.functional.one_hot(ref_label, MNIST_NUM_CLASSES).type(torch.FloatTensor).to(next(generator.parameters()).device)
generated_img = postprocess_generated_img(generator(latent_vector, ref_label_one_hot_encoding))
return generated_img, latent_vector.to('cpu').numpy()[0]
def generate_from_specified_numpy_latent_vector(generator, latent_vector):
assert isinstance(latent_vector, np.ndarray), f'Expected latent vector to be numpy array but got {type(latent_vector)}.'
with torch.no_grad():
latent_vector_tensor = torch.unsqueeze(torch.tensor(latent_vector, device=next(generator.parameters()).device), dim=0)
return postprocess_generated_img(generator(latent_vector_tensor))
def linear_interpolation(t, p0, p1):
return p0 + t * (p1 - p0)
def spherical_interpolation(t, p0, p1):
""" Spherical interpolation (slerp) formula: https://en.wikipedia.org/wiki/Slerp
Found inspiration here: https://github.com/soumith/ganhacks
but I didn't get any improvement using it compared to linear interpolation.
Args:
t (float): has [0, 1] range
p0 (numpy array): First n-dimensional vector
p1 (numpy array): Second n-dimensional vector
Result:
Returns spherically interpolated vector.
"""
if t <= 0:
return p0
elif t >= 1:
return p1
elif np.allclose(p0, p1):
return p0
# Convert p0 and p1 to unit vectors and find the angle between them (omega)
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
sin_omega = np.sin(omega) # syntactic sugar
return np.sin((1.0 - t) * omega) / sin_omega * p0 + np.sin(t * omega) / sin_omega * p1
def display_vector_arithmetic_results(imgs_to_display):
fig = plt.figure(figsize=(6, 6))
title_fontsize = 'x-small'
num_display_imgs = 7
titles = ['happy women', 'happy woman (avg)', 'neutral women', 'neutral woman (avg)', 'neutral men', 'neutral man (avg)', 'result - happy man']
ax = np.zeros(num_display_imgs, dtype=object)
assert len(imgs_to_display) == num_display_imgs, f'Expected {num_display_imgs} got {len(imgs_to_display)} images.'
gs = fig.add_gridspec(5, 4, left=0.02, right=0.98, wspace=0.05, hspace=0.3)
ax[0] = fig.add_subplot(gs[0, :3])
ax[1] = fig.add_subplot(gs[0, 3])
ax[2] = fig.add_subplot(gs[1, :3])
ax[3] = fig.add_subplot(gs[1, 3])
ax[4] = fig.add_subplot(gs[2, :3])
ax[5] = fig.add_subplot(gs[2, 3])
ax[6] = fig.add_subplot(gs[3:, 1:3])
for i in range(num_display_imgs):
ax[i].imshow(cv.resize(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.INTER_NEAREST))
ax[i].set_title(titles[i], fontsize=title_fontsize)
ax[i].tick_params(which='both', bottom=False, left=False, labelleft=False, labelbottom=False)
plt.show()
def generate_new_images(model_name, cgan_digit=None, generation_mode=True, slerp=True, a=None, b=None, should_display=True):
""" Generate imagery using pre-trained generator (using vanilla_generator_000000.pth by default)
Args:
model_name (str): model name you want to use (default lookup location is BINARIES_PATH).
cgan_digit (int): if specified generate that exact digit.
generation_mode (enum): generate a single image from a random vector, interpolate between the 2 chosen latent
vectors, or perform arithmetic over latent vectors (note: not every mode is supported for every model type)
slerp (bool): if True use spherical interpolation otherwise use linear interpolation.
a, b (numpy arrays): latent vectors, if set to None you'll be prompted to choose images you like,
and use corresponding latent vectors instead.
should_display (bool): Display the generated images before saving them.
"""
model_path = os.path.join(BINARIES_PATH, model_name)
assert os.path.exists(model_path), f'Could not find the model {model_path}. You first need to train your generator.'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Prepare the correct (vanilla, cGAN, DCGAN, ...) model, load the weights and put the model into evaluation mode
model_state = torch.load(model_path)
gan_type = model_state["gan_type"]
print(f'Found {gan_type} GAN!')
_, generator = utils.get_gan(device, gan_type)
generator.load_state_dict(model_state["state_dict"], strict=True)
generator.eval()
# Generate a single image, save it and potentially display it
if generation_mode == GenerationMode.SINGLE_IMAGE:
generated_imgs_path = os.path.join(DATA_DIR_PATH, 'generated_imagery')
os.makedirs(generated_imgs_path, exist_ok=True)
generated_img, _ = generate_from_random_latent_vector(generator, cgan_digit if gan_type == GANType.CGAN.name else None)
utils.save_and_maybe_display_image(generated_imgs_path, generated_img, should_display=should_display)
# Pick 2 images you like between which you'd like to interpolate (by typing 'y' into console)
elif generation_mode == GenerationMode.INTERPOLATION:
assert gan_type == GANType.VANILLA.name or gan_type ==GANType.DCGAN.name, f'Got {gan_type} but only VANILLA/DCGAN are supported for the interpolation mode.'
interpolation_name = "spherical" if slerp else "linear"
interpolation_fn = spherical_interpolation if slerp else linear_interpolation
grid_interpolated_imgs_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery') # combined results dir
decomposed_interpolated_imgs_path = os.path.join(grid_interpolated_imgs_path, f'tmp_{gan_type}_{interpolation_name}_dump') # dump separate results
if os.path.exists(decomposed_interpolated_imgs_path):
shutil.rmtree(decomposed_interpolated_imgs_path)
os.makedirs(grid_interpolated_imgs_path, exist_ok=True)
os.makedirs(decomposed_interpolated_imgs_path, exist_ok=True)
latent_vector_a, latent_vector_b = [None, None]
# If a and b were not specified loop until the user picked the 2 images he/she likes.
found_good_vectors_flag = False
if a is None or b is None:
while not found_good_vectors_flag:
generated_img, latent_vector = generate_from_random_latent_vector(generator)
plt.imshow(generated_img); plt.title('Do you like this image?'); plt.show()
user_input = input("Do you like this generated image? [y for yes]:")
if user_input == 'y':
if latent_vector_a is None:
latent_vector_a = latent_vector
print('Saved the first latent vector.')
elif latent_vector_b is None:
latent_vector_b = latent_vector
print('Saved the second latent vector.')
found_good_vectors_flag = True
else:
print('Well lets generate a new one!')
continue
else:
print('Skipping latent vectors selection section and using cached ones.')
latent_vector_a, latent_vector_b = [a, b]
# Cache latent vectors
if a is None or b is None:
np.save(os.path.join(grid_interpolated_imgs_path, 'a.npy'), latent_vector_a)
np.save(os.path.join(grid_interpolated_imgs_path, 'b.npy'), latent_vector_b)
print(f'Lets do some {interpolation_name} interpolation!')
interpolation_resolution = 47 # number of images between the vectors a and b
num_interpolated_imgs = interpolation_resolution + 2 # + 2 so that we include a and b
generated_imgs = []
for i in range(num_interpolated_imgs):
t = i / (num_interpolated_imgs - 1) # goes from 0. to 1.
current_latent_vector = interpolation_fn(t, latent_vector_a, latent_vector_b)
generated_img = generate_from_specified_numpy_latent_vector(generator, current_latent_vector)
print(f'Generated image [{i+1}/{num_interpolated_imgs}].')
utils.save_and_maybe_display_image(decomposed_interpolated_imgs_path, generated_img, should_display=should_display)
# Move from channel last to channel first (CHW->HWC), PyTorch's save_image function expects BCHW format
generated_imgs.append(torch.tensor( | np.moveaxis(generated_img, 2, 0) | numpy.moveaxis |
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from numpy import exp, log, mean, std, sqrt, tanh, cos, cov
from numpy import array, linspace, sort, searchsorted, pi, argmax, argsort, logaddexp
from numpy.random import random
from scipy.integrate import quad, simps
from scipy.optimize import minimize, minimize_scalar, differential_evolution
from warnings import warn
from itertools import product
from functools import reduce
import matplotlib.pyplot as plt
class DensityEstimator(object):
"""
Parent class for the 1D density estimation classes GaussianKDE and UnimodalPdf.
"""
def __init__(self):
self.lwr_limit = None
self.upr_limit = None
self.mode = None
def __call__(self, x):
return None
def interval(self, frac=0.95):
p_max = self(self.mode)
p_conf = self.binary_search(
self.interval_prob, frac, [0.0, p_max], uphill=False
)
return self.get_interval(p_conf)
def get_interval(self, z):
lwr = self.binary_search(self, z, [self.lwr_limit, self.mode], uphill=True)
upr = self.binary_search(self, z, [self.mode, self.upr_limit], uphill=False)
return lwr, upr
def interval_prob(self, z):
lwr, upr = self.get_interval(z)
return quad(self, lwr, upr, limit=100)[0]
def moments(self):
pass
def plot_summary(self, filename=None, show=True, label=None):
"""
Plot the estimated PDF along with summary statistics.
:keyword str filename: Filename to which the plot will be saved. If unspecified, the plot will not be saved.
:keyword bool show: Boolean value indicating whether the plot should be displayed in a window. (Default is True)
:keyword str label: The label to be used for the x-axis on the plot as a string.
"""
def ensure_is_nested_list(var):
if not isinstance(var[0], (list, tuple)):
var = [var]
return var
sigma_1 = ensure_is_nested_list(self.interval(frac=0.68268))
sigma_2 = ensure_is_nested_list(self.interval(frac=0.95449))
sigma_3 = ensure_is_nested_list(self.interval(frac=0.9973))
mu, var, skw, kur = self.moments()
if type(self) is GaussianKDE:
lwr = sigma_3[0][0] - 5 * self.h
upr = sigma_3[0][1] + 5 * self.h
else:
s_min = sigma_3[0][0]
s_max = sigma_3[-1][1]
lwr = s_min - 0.1 * (s_max - s_min)
upr = s_max + 0.1 * (s_max - s_min)
axis = linspace(lwr, upr, 500)
fig, ax = plt.subplots(
nrows=1,
ncols=2,
figsize=(10, 6),
gridspec_kw={"width_ratios": [2, 1]},
)
ax[0].plot(axis, self(axis), lw=1, c="C0")
ax[0].fill_between(axis, self(axis), color="C0", alpha=0.1)
ax[0].plot([self.mode, self.mode], [0.0, self(self.mode)], c="red", ls="dashed")
ax[0].set_xlabel(label or "argument", fontsize=13)
ax[0].set_ylabel("probability density", fontsize=13)
ax[0].set_ylim([0.0, None])
ax[0].grid()
gap = 0.05
h = 0.95
x1 = 0.35
x2 = 0.40
def section_title(height, name):
ax[1].text(0.0, height, name, horizontalalignment="left", fontweight="bold")
return height - gap
def write_quantity(height, name, value):
ax[1].text(x1, height, f"{name}:", horizontalalignment="right")
ax[1].text(x2, height, f"{value:.5G}", horizontalalignment="left")
return height - gap
h = section_title(h, "Basics")
h = write_quantity(h, "Mode", self.mode)
h = write_quantity(h, "Mean", mu)
h = write_quantity(h, "Standard dev", sqrt(var))
h -= gap
h = section_title(h, "Highest-density intervals")
def write_sigma(height, name, sigma):
ax[1].text(x1, height, name, horizontalalignment="right")
for itvl in sigma:
ax[1].text(
x2,
height,
rf"{itvl[0]:.5G} $\rightarrow$ {itvl[1]:.5G}",
horizontalalignment="left",
)
height -= gap
return height
h = write_sigma(h, "1-sigma:", sigma_1)
h = write_sigma(h, "2-sigma:", sigma_2)
h = write_sigma(h, "3-sigma:", sigma_3)
h -= gap
h = section_title(h, "Higher moments")
h = write_quantity(h, "Variance", var)
h = write_quantity(h, "Skewness", skw)
h = write_quantity(h, "Kurtosis", kur)
ax[1].axis("off")
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
if show:
plt.show()
return fig, ax
@staticmethod
def binary_search(func, value, bounds, uphill=True):
x_min, x_max = bounds
x = (x_min + x_max) * 0.5
converged = False
while not converged:
f = func(x)
if f > value:
if uphill:
x_max = x
else:
x_min = x
else:
if uphill:
x_min = x
else:
x_max = x
x = (x_min + x_max) * 0.5
if abs((x_max - x_min) / x) < 1e-3:
converged = True
# now linearly interpolate as a polish step
f_max = func(x_max)
f_min = func(x_min)
df = f_max - f_min
return x_min * ((f_max - value) / df) + x_max * ((value - f_min) / df)
class UnimodalPdf(DensityEstimator):
"""
Construct a UnimodalPdf object, which can be called as a function to
return the estimated PDF of the given sample.
The UnimodalPdf class is designed to robustly estimate univariate, unimodal probability
distributions given a sample drawn from that distribution. This is a parametric method
based on an heavily modified student-t distribution, which is extremely flexible.
:param sample: 1D array of samples from which to estimate the probability distribution
"""
def __init__(self, sample):
self.sample = array(sample)
self.n_samps = len(sample)
# chebyshev quadtrature weights and axes
self.sd = 0.2
self.n_nodes = 128
k = linspace(1, self.n_nodes, self.n_nodes)
t = cos(0.5 * pi * ((2 * k - 1) / self.n_nodes))
self.u = t / (1.0 - t**2)
self.w = (pi / self.n_nodes) * (1 + t**2) / (self.sd * (1 - t**2) ** 1.5)
# first minimise based on a slice of the sample, if it's large enough
self.cutoff = 2000
self.skip = max(self.n_samps // self.cutoff, 1)
self.x = self.sample[:: self.skip]
self.n = len(self.x)
# makes guesses based on sample moments
guesses = self.generate_guesses()
# sort the guesses by the lowest score
guesses = sorted(guesses, key=self.minfunc)
# minimise based on the best guess
self.min_result = minimize(self.minfunc, guesses[0], method="Nelder-Mead")
self.MAP = self.min_result.x
self.mode = self.MAP[0]
# if we were using a reduced sample, use full sample
if self.skip > 1:
self.x = self.sample
self.n = self.n_samps
self.min_result = minimize(self.minfunc, self.MAP, method="Nelder-Mead")
self.MAP = self.min_result.x
self.mode = self.MAP[0]
# normalising constant for the MAP estimate curve
self.map_lognorm = log(self.norm(self.MAP))
# set some bounds for the confidence limits calculation
x0, s0, v, f, k, q = self.MAP
self.upr_limit = x0 + s0 * (4 * exp(f) + 1)
self.lwr_limit = x0 - s0 * (4 * exp(-f) + 1)
def generate_guesses(self):
mu, sigma, skew = self.sample_moments()
x0 = [mu, mu - sigma * skew * 0.15, mu - sigma * skew * 0.3]
v = [0, 5.0]
s0 = [sigma, sigma * 2]
f = [0.5 * skew, skew]
k = [1.0, 4.0, 8.0]
q = [2.0]
return [array(i) for i in product(x0, s0, v, f, k, q)]
def sample_moments(self):
mu = mean(self.x)
x2 = self.x**2
x3 = x2 * self.x
sig = sqrt(mean(x2) - mu**2)
skew = (mean(x3) - 3 * mu * sig**2 - mu**3) / sig**3
return mu, sig, skew
def __call__(self, x):
"""
Evaluate the PDF estimate at a set of given axis positions.
:param x: axis location(s) at which to evaluate the estimate.
:return: values of the PDF estimate at the specified locations.
"""
return exp(self.log_pdf_model(x, self.MAP) - self.map_lognorm)
def posterior(self, paras):
x0, s0, v, f, k, q = paras
# prior checks
if (s0 > 0) & (0 < k < 20) & (1 < q < 6):
normalisation = self.n * log(self.norm(paras))
return self.log_pdf_model(self.x, paras).sum() - normalisation
else:
return -1e50
def minfunc(self, paras):
return -self.posterior(paras)
def norm(self, pvec):
v = self.pdf_model(self.u, [0.0, self.sd, *pvec[2:]])
integral = (self.w * v).sum() * pvec[1]
return integral
def pdf_model(self, x, pvec):
return exp(self.log_pdf_model(x, pvec))
def log_pdf_model(self, x, pvec):
x0, s0, v, f, k, q = pvec
v = exp(v) + 1
z0 = (x - x0) / s0
ds = exp(f * tanh(z0 / k))
z = z0 / ds
log_prob = -(0.5 * (1 + v)) * log(1 + (abs(z) ** q) / v)
return log_prob
def moments(self):
"""
Calculate the mean, variance skewness and excess kurtosis of the estimated PDF.
:return: mean, variance, skewness, ex-kurtosis
"""
s = self.MAP[1]
f = self.MAP[3]
lwr = self.mode - 5 * max(exp(-f), 1.0) * s
upr = self.mode + 5 * max(exp(f), 1.0) * s
x = linspace(lwr, upr, 1000)
p = self(x)
mu = simps(p * x, x=x)
var = simps(p * (x - mu) ** 2, x=x)
skw = simps(p * (x - mu) ** 3, x=x) / var * 1.5
kur = (simps(p * (x - mu) ** 4, x=x) / var**2) - 3.0
return mu, var, skw, kur
class GaussianKDE(DensityEstimator):
"""
Construct a GaussianKDE object, which can be called as a function to
return the estimated PDF of the given sample.
GaussianKDE uses Gaussian kernel-density estimation to estimate the PDF
associated with a given sample.
:param sample: \
1D array of samples from which to estimate the probability distribution
:param float bandwidth: \
Width of the Gaussian kernels used for the estimate. If not specified,
an appropriate width is estimated based on sample data.
:param bool cross_validation: \
Indicate whether or not cross-validation should be used to estimate
the bandwidth in place of the simple 'rule of thumb' estimate which
is normally used.
:param int max_cv_samples: \
The maximum number of samples to be used when estimating the bandwidth
via cross-validation. The computational cost scales roughly quadratically
with the number of samples used, and can become prohibitive for samples of
size in the tens of thousands and up. Instead, if the sample size is greater
than *max_cv_samples*, the cross-validation is performed on a sub-sample of
this size.
"""
def __init__(
self, sample, bandwidth=None, cross_validation=False, max_cv_samples=5000
):
self.s = sort(array(sample).flatten()) # sorted array of the samples
self.max_cvs = (
max_cv_samples # maximum number of samples to be used for cross-validation
)
if self.s.size < 3:
raise ValueError(
"""
[ GaussianKDE error ]
Not enough samples were given to estimate the PDF.
At least 3 samples are required.
"""
)
if bandwidth is None:
self.h = self.simple_bandwidth_estimator() # very simple bandwidth estimate
if cross_validation:
self.h = self.cross_validation_bandwidth_estimator(self.h)
else:
self.h = bandwidth
# define some useful constants
self.norm = 1.0 / (len(self.s) * sqrt(2 * pi) * self.h)
self.cutoff = self.h * 4
self.q = 1.0 / (sqrt(2) * self.h)
self.lwr_limit = self.s[0] - self.cutoff * 0.5
self.upr_limit = self.s[-1] + self.cutoff * 0.5
# decide how many regions the axis should be divided into
n = int(log((self.s[-1] - self.s[0]) / self.h) / log(2)) + 1
# now generate midpoints of these regions
mids = linspace(self.s[0], self.s[-1], 2**n + 1)
mids = 0.5 * (mids[1:] + mids[:-1])
# get the cutoff indices
lwr_inds = searchsorted(self.s, mids - self.cutoff)
upr_inds = searchsorted(self.s, mids + self.cutoff)
slices = [slice(l, u) for l, u in zip(lwr_inds, upr_inds)]
# now build a dict that maps midpoints to the slices
self.slice_map = dict(zip(mids, slices))
# build a binary tree which allows fast look-up of which
# region contains a given value
self.tree = BinaryTree(n, (self.s[0], self.s[-1]))
#: The mode of the pdf, calculated automatically when an instance of GaussianKDE is created.
self.mode = self.locate_mode()
def __call__(self, x_vals):
"""
Evaluate the PDF estimate at a set of given axis positions.
:param x_vals: axis location(s) at which to evaluate the estimate.
:return: values of the PDF estimate at the specified locations.
"""
if hasattr(x_vals, "__iter__"):
return [self.density(x) for x in x_vals]
else:
return self.density(x_vals)
def density(self, x):
# look-up the region
region = self.tree.lookup(x)
# look-up the cutting points
slc = self.slice_map[region[2]]
# evaluate the density estimate from the slice
return self.norm * exp(-(((x - self.s[slc]) * self.q) ** 2)).sum()
def simple_bandwidth_estimator(self):
# A simple estimate which assumes the distribution close to a Gaussian
return 1.06 * std(self.s) / (len(self.s) ** 0.2)
def cross_validation_bandwidth_estimator(self, initial_h):
"""
Selects the bandwidth by maximising a log-probability derived
using a 'leave-one-out cross-validation' approach.
"""
# first check if we need to sub-sample for computational cost reduction
if len(self.s) > self.max_cvs:
scrambler = argsort(random(size=len(self.s)))
samples = (self.s[scrambler])[: self.max_cvs]
else:
samples = self.s
# create a grid in log-bandwidth space and evaluate the log-prob across it
dh = 0.5
log_h = [initial_h + m * dh for m in (-2, -1, 0, 1, 2)]
log_p = [self.cross_validation_logprob(samples, exp(h)) for h in log_h]
# if the maximum log-probability is at the edge of the grid, extend it
for i in range(5):
# stop when the maximum is not at the edge
max_ind = argmax(log_p)
if 0 < max_ind < len(log_h) - 1:
break
if max_ind == 0: # extend grid to lower bandwidths
new_h = log_h[0] - dh
new_lp = self.cross_validation_logprob(samples, exp(new_h))
log_h.insert(0, new_h)
log_p.insert(0, new_lp)
else: # extend grid to higher bandwidths
new_h = log_h[-1] + dh
new_lp = self.cross_validation_logprob(samples, exp(new_h))
log_h.append(new_h)
log_p.append(new_lp)
# cost of evaluating the cross-validation is expensive, so we want to
# minimise total evaluations. Here we assume the CV score has only one
# maxima, and use recursive grid refinement to rapidly find it.
for refine in range(6):
max_ind = int(argmax(log_p))
lwr_h = 0.5 * (log_h[max_ind - 1] + log_h[max_ind])
upr_h = 0.5 * (log_h[max_ind] + log_h[max_ind + 1])
lwr_lp = self.cross_validation_logprob(samples, exp(lwr_h))
upr_lp = self.cross_validation_logprob(samples, exp(upr_h))
log_h.insert(max_ind, lwr_h)
log_p.insert(max_ind, lwr_lp)
log_h.insert(max_ind + 2, upr_h)
log_p.insert(max_ind + 2, upr_lp)
h_estimate = exp(log_h[argmax(log_p)])
return h_estimate
def cross_validation_logprob(self, samples, width, c=0.99):
"""
This function uses a 'leave-one-out cross-validation' (LOO-CV)
approach to calculate a log-probability associated with the
density estimate - the bandwidth can be selected by maximising
this log-probability.
"""
# evaluate the log-pdf estimate at each sample point
log_pdf = self.log_evaluation(samples, samples, width)
# remove the contribution at each sample due to itself
d = log(c) - log(width * len(samples) * sqrt(2 * pi)) - log_pdf
loo_adjustment = log(1 - exp(d))
log_probs = log_pdf + loo_adjustment
return log_probs.sum() # sum to find the overall log-probability
@staticmethod
def log_kernel(x, c, h):
z = (x - c) / h
return -0.5 * z**2 - log(h)
def log_evaluation(self, points, samples, width):
# evaluate the log-pdf in a way which prevents underflow
generator = (self.log_kernel(points, s, width) for s in samples)
return reduce(logaddexp, generator) - log(len(samples) * sqrt(2 * pi))
def locate_mode(self):
# if there are enough samples, use the 20% HDI to bound the search for the mode
if self.s.size > 50:
lwr, upr = sample_hdi(self.s, 0.2)
else: # else just use the entire range of the samples
lwr, upr = self.s[0], self.s[-1]
result = minimize_scalar(
lambda x: -self(x), bounds=[lwr, upr], method="bounded"
)
return result.x
def moments(self):
"""
Calculate the mean, variance skewness and excess kurtosis of the estimated PDF.
:return: mean, variance, skewness, ex-kurtosis
Note that these quantities are calculated directly from the estimated PDF, and
not from the sample values.
"""
N = 1000
x = | linspace(self.lwr_limit, self.upr_limit, N) | numpy.linspace |
import hashlib
import os
import os.path as osp
import cv2
import fastfunc
import mmcv
import numpy as np
import pyassimp
import pyassimp.postprocess
# import optimesh
import scipy
from meshplex import MeshTri
from plyfile import PlyData, PlyElement
from scipy.spatial.distance import pdist
from skimage import measure
from tqdm import tqdm
from vispy import gloo
from lib.pysixd import inout
from lib.utils.utils import iprint
"""
modified
support model_loadfn to load objects
* pyassimp
* pysixd: the bop toolkit implementation
* plydata: the original implementation
"""
class Model3D:
""" """
def __init__(
self,
file_to_load=None,
center=False,
scale_to_meter=1.0,
finalize=True,
):
"""
finalize:
"""
self.scale_to_meter = scale_to_meter
self.vertices = None
self.centroid = None
self.indices = None
self.colors = None
self.texcoord = None
self.texture = None
self.collated = None
self.vertex_buffer = None
self.index_buffer = None
self.bb = None
self.bb_vbuffer = None
self.bb_ibuffer = None
self.diameter = None
if file_to_load:
self.load(file_to_load, center, scale_to_meter)
if finalize:
self.finalize()
def _compute_bbox(self):
self.bb = []
minx, maxx = min(self.vertices[:, 0]), max(self.vertices[:, 0])
miny, maxy = min(self.vertices[:, 1]), max(self.vertices[:, 1])
minz, maxz = min(self.vertices[:, 2]), max(self.vertices[:, 2])
avgx = np.average(self.vertices[:, 0])
avgy = np.average(self.vertices[:, 1])
avgz = np.average(self.vertices[:, 2])
self.bb.append([minx, miny, minz])
self.bb.append([minx, maxy, minz])
self.bb.append([minx, miny, maxz])
self.bb.append([minx, maxy, maxz])
self.bb.append([maxx, miny, minz])
self.bb.append([maxx, maxy, minz])
self.bb.append([maxx, miny, maxz])
self.bb.append([maxx, maxy, maxz])
self.bb.append([avgx, avgy, avgz])
self.bb = np.asarray(self.bb, dtype=np.float32)
self.diameter = max(pdist(self.bb, "euclidean"))
# Set up rendering data
# fmt: off
colors = [[1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1], [0, 1, 0], [0.5, 0, 0.5], [0, 0.5, 0.5]]
# fmt: on
indices = [
0,
1,
0,
2,
3,
1,
3,
2,
4,
5,
4,
6,
7,
5,
7,
6,
0,
4,
1,
5,
2,
6,
3,
7,
]
vertices_type = [
("a_position", np.float32, 3),
("a_color", np.float32, 3),
]
collated = np.asarray(list(zip(self.bb, colors)), vertices_type)
# self.bb_vbuffer = gloo.VertexBuffer(collated)
# self.bb_ibuffer = gloo.IndexBuffer(indices)
self.bb_vbuffer = collated
self.bb_ibuffer = indices
def load(
self,
path,
center=False,
scale_to_meter=1.0,
flip_opengl=False,
texture_path=None,
):
suffix = osp.basename(path).split(".")[-1]
if suffix.lower() == "ply":
self.load_ply(path)
elif suffix.lower() == "obj":
self.load_obj(path, tex=texture_path)
else:
raise ValueError("Cannot load models with ending {}".format(suffix))
self.scale_to_meter = scale_to_meter
self.vertices *= self.scale_to_meter
if center:
xmin, xmax = np.amin(self.vertices[:, 0]), np.amax(self.vertices[:, 0])
ymin, ymax = np.amin(self.vertices[:, 1]), np.amax(self.vertices[:, 1])
zmin, zmax = np.amin(self.vertices[:, 2]), np.amax(self.vertices[:, 2])
self.vertices[:, 0] += -xmin - (xmax - xmin) * 0.5
self.vertices[:, 1] += -ymin - (ymax - ymin) * 0.5
self.vertices[:, 2] += -zmin - (zmax - zmin) * 0.5
if flip_opengl:
self.yz_flip = np.eye(4, dtype=np.float32)
# self.yz_flip[1, 1], self.yz_flip[2, 2] = -1, -1
# self.yz_flip[0, 0], self.yz_flip[1, 1], self.yz_flip[2, 2] = -1, -1, -1
# self.vertices = np.matmul(self.yz_flip[:3, :3], self.vertices.T).T
# self.vertices[:, 1] *= -1
def finalize(self, center=False):
xmin, xmax = np.amin(self.vertices[:, 0]), np.amax(self.vertices[:, 0])
ymin, ymax = np.amin(self.vertices[:, 1]), np.amax(self.vertices[:, 1])
zmin, zmax = np.amin(self.vertices[:, 2]), np.amax(self.vertices[:, 2])
self.xsize = xmax - xmin
self.ysize = ymax - ymin
self.zsize = zmax - zmin
if center:
# fmt: off
self.vertices[:, 0] -= (xmax + xmin) * 0.5
self.vertices[:, 1] -= (ymax + ymin) * 0.5
self.vertices[:, 2] -= (zmax + zmin) * 0.5
# fmt: on
self.centroid = np.mean(self.vertices, 0)
self._compute_bbox()
if self.colors is None: # gray color
self.colors = 0.5 * np.ones((self.vertices.shape[0], 3))
if self.texcoord is not None:
vertices_type = [
("a_position", np.float32, 3),
("a_texcoord", np.float32, 2),
]
self.collated = np.asarray(list(zip(self.vertices, self.texcoord)), vertices_type)
else:
vertices_type = [
("a_position", np.float32, 3),
("a_color", np.float32, 3),
]
self.collated = np.asarray(list(zip(self.vertices, self.colors)), vertices_type)
# self.vertex_buffer = gloo.VertexBuffer(self.collated)
# self.index_buffer = gloo.IndexBuffer(self.indices.flatten())
self.vertex_buffer = self.collated
self.index_buffer = self.indices.tolist()
def normalize(self, scale):
xmin, xmax = np.amin(self.vertices[:, 0]), np.amax(self.vertices[:, 0])
ymin, ymax = np.amin(self.vertices[:, 1]), np.amax(self.vertices[:, 1])
zmin, zmax = np.amin(self.vertices[:, 2]), np.amax(self.vertices[:, 2])
self.xsize = xmax - xmin
self.ysize = ymax - ymin
self.zsize = zmax - zmin
self.vertices[:, 0] -= (xmax + xmin) * 0.5
self.vertices[:, 1] -= (ymax + ymin) * 0.5
self.vertices[:, 2] -= (zmax + zmin) * 0.5
self.vertices = (self.vertices / np.max(self.vertices, axis=0)) * scale
# print(scale, np.max(self.vertices, axis=0), np.min(self.vertices, axis=0))
def load_obj(self, path, tex=None):
"""Loads a Wavefront OBJ file."""
self.path = path
scene = pyassimp.load(path, processing=pyassimp.postprocess.aiProcess_Triangulate)
self.vertices = np.asarray([]).reshape(0, 3)
self.normals = np.asarray([]).reshape(0, 3)
self.texcoord = np.asarray([]).reshape(0, 2)
self.indices = np.asarray([], dtype=np.uint32).reshape(0, 3)
for mesh_id, mesh in enumerate(scene.meshes):
iprint(
mesh.texturecoords.shape,
mesh.vertices.shape,
mesh.faces.shape,
mesh.normals.shape,
self.vertices.shape[0],
)
if mesh.texturecoords.shape[0] == 0 or mesh.texturecoords.shape[1] != mesh.vertices.shape[0]:
continue
# print(self.indices.shape, (self.vertices.shape[0] + np.asarray(mesh.faces, dtype=np.uint32)).shape)
self.indices = np.concatenate(
[
self.indices,
self.vertices.shape[0] + np.asarray(mesh.faces, dtype=np.uint32),
]
)
self.vertices = np.concatenate([self.vertices, mesh.vertices])
self.normals = np.concatenate([self.normals, mesh.normals])
if mesh.texturecoords.shape[0] == 0:
self.texcoord = np.concatenate([self.texcoord, np.zeros((mesh.vertisces.shape[0], 2))])
else:
self.texcoord = np.concatenate([self.texcoord, mesh.texturecoords[0, :, :2]])
# print(mesh_id, self.indices[:-10])
if tex is not None:
image = cv2.flip(cv2.imread(tex, cv2.IMREAD_UNCHANGED), 0)
if False:
cv2.imshow("tex", image)
cv2.waitKey()
# self.texture = gloo.Texture2D(image, resizable=False)
self.texture = image
iprint(np.min(self.texcoord, axis=0), np.max(self.texcoord, axis=0))
# if (np.max(self.texcoord, axis=0) > 1).any() or (np.min(self.texcoord, axis=0) < 0).any():
# self.texcoord -= np.min(self.texcoord, axis=0)
# self.texcoord /= np.max(self.texcoord, axis=0)
# self.texcoord = [2.5, 1.5] * self.texcoord
if self.vertices.shape[0] < 100:
return False
return True
def load_ply(self, path):
data = PlyData.read(path)
self.vertices = np.zeros((data["vertex"].count, 3))
self.vertices[:, 0] = np.array(data["vertex"]["x"])
self.vertices[:, 1] = np.array(data["vertex"]["y"])
self.vertices[:, 2] = np.array(data["vertex"]["z"])
self.indices = np.asarray(list(data["face"]["vertex_indices"]), np.uint32)
# Look for texture map as jpg or png
filename = osp.basename(path)
abs_path = path[: path.find(filename)]
tex_to_load = None
if osp.exists(abs_path + filename[:-4] + ".jpg"):
tex_to_load = abs_path + filename[:-4] + ".jpg"
elif osp.exists(abs_path + filename[:-4] + ".png"):
tex_to_load = abs_path + filename[:-4] + ".png"
# Try to read out texture coordinates
if tex_to_load is not None:
iprint("Loading {} with texture {}".format(osp.normpath(filename), tex_to_load))
# Must be flipped because of OpenGL
image = cv2.flip(cv2.imread(tex_to_load, cv2.IMREAD_UNCHANGED), 0)
# self.texture = gloo.Texture2D(image)
self.texture = image
# If texcoords are face-wise
if "texcoord" in str(data):
self.texcoord = np.asarray(list(data["face"]["texcoord"]))
# Check same face count
assert self.indices.shape[0] == self.texcoord.shape[0]
temp = np.zeros((data["vertex"].count, 2))
temp[self.indices.flatten()] = self.texcoord.reshape((-1, 2))
self.texcoord = temp
# If texcoords are vertex-wise
elif "texture_u" in str(data):
self.texcoord = np.zeros((data["vertex"].count, 2))
self.texcoord[:, 0] = np.array(data["vertex"]["texture_u"])
self.texcoord[:, 1] = np.array(data["vertex"]["texture_v"])
# If no texture coords loaded, fall back to vertex colors
if self.texcoord is None:
self.colors = 0.5 * np.ones((data["vertex"].count, 3))
if "blue" in str(data):
iprint("Loading {} with vertex colors".format(filename))
self.colors[:, 0] = np.array(data["vertex"]["blue"])
self.colors[:, 1] = np.array(data["vertex"]["green"])
self.colors[:, 2] = np.array(data["vertex"]["red"])
self.colors /= 255.0
else:
iprint("Loading {} without any colors!!".format(filename))
def _smooth_laplacian(self, vertices, faces, iterations):
mesh = MeshTri(vertices, faces)
# move interior points into average of their neighbors
num_neighbors = np.zeros(len(mesh.node_coords), dtype=int)
idx = mesh.edges["nodes"]
fastfunc.add.at(num_neighbors, idx, np.ones(idx.shape, dtype=int))
new_points = np.zeros(mesh.node_coords.shape)
fastfunc.add.at(new_points, idx[:, 0], mesh.node_coords[idx[:, 1]])
fastfunc.add.at(new_points, idx[:, 1], mesh.node_coords[idx[:, 0]])
new_points /= num_neighbors[:, None]
idx = mesh.is_boundary_node
new_points[idx] = mesh.node_coords[idx]
return new_points
# Takes sdf and extends to return a Model3D
def load_from_tsdf(
self,
sdf,
extends=[1, 1, 1],
spacing=(2.0, 2.0, 2.0),
step_size=2,
laplacian_smoothing=False,
color=None,
image_colors=None,
pose=None,
cam=None,
points_and_colors=None,
):
# Use marching cubes to obtain the surface mesh of these ellipsoids
verts, faces, normals, _ = measure.marching_cubes_lewiner(sdf, 0, spacing=spacing, step_size=step_size)
# quenze it between 0 and 1
for i in range(3):
verts[:, i] = verts[:, i] - np.min(verts[:, i])
verts[:, i] /= np.max(verts[:, i])
# tsdf is upside down
# verts[:, 1] *= -1
# scale mesh to correct size of gt
verts = verts * np.asarray(extends)
# load vertices, faces, normals and finalize
self.indices = np.asarray(faces, dtype=np.uint32)
self.vertices = np.asarray(verts)
xmin, xmax = np.amin(self.vertices[:, 0]), np.amax(self.vertices[:, 0])
ymin, ymax = np.amin(self.vertices[:, 1]), np.amax(self.vertices[:, 1])
zmin, zmax = np.amin(self.vertices[:, 2]), np.amax(self.vertices[:, 2])
self.xsize = xmax - xmin
self.ysize = ymax - ymin
self.zsize = zmax - zmin
self.vertices[:, 0] += -xmin - (xmax - xmin) * 0.5
self.vertices[:, 1] += -ymin - (ymax - ymin) * 0.5
self.vertices[:, 2] += -zmin - (zmax - zmin) * 0.5
self.normals = np.asarray(normals)
if image_colors is not None and pose is not None and cam is not None:
verts_up_down = self.vertices.copy()
vertices = np.matmul(pose[:3, :3], verts_up_down.T).T + pose[:3, 3]
rgb = image_colors[:, :, ::-1].copy()
# take scene colors to make it look better
# version two
img_pt = np.matmul(cam, vertices.T).T
img_pt[:, :2] /= img_pt[:, 2:3]
img_pt = np.asarray(img_pt[:, :2], dtype=np.int32)
color = []
for p in img_pt:
if p[0] > 0 and p[0] < image_colors.shape[1] and p[1] > 0 and p[1] < image_colors.shape[0]:
color.append(rgb[p[1], p[0]])
else:
color.append([1, 0, 0])
self.colors = np.asarray(color)
elif points_and_colors is not None and pose is not None:
verts_up_down = self.vertices.copy()
vertices = np.matmul(pose[:3, :3], verts_up_down.T).T + pose[:3, 3]
near_points = []
near_colors = []
for point, color in zip(points_and_colors[0], points_and_colors[1]):
centroid = pose[:3, 3]
if np.linalg.norm(point - centroid) < 6.0:
near_points.append(point)
near_colors.append(color)
near_points = np.asarray(near_points)
near_colors = np.asarray(near_colors)
dist = scipy.spatial.distance.cdist(vertices, near_points)
colors_idx = np.argmin(dist, axis=1)
self.colors = near_colors[colors_idx][:, :3]
else:
if color is None:
self.colors = 0.5 * | np.ones((self.vertices.shape[0], 3)) | numpy.ones |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
from radial import orbit, dataset, rv_model, prior
import scipy.signal as ss
import matplotlib.pyplot as plt
import matplotlib.markers as mrk
import lmfit
import corner
import emcee
import astropy.units as u
import astropy.constants as c
"""
This code contains routines to estimate the orbital parameters of a binary
system by means of maximum likelihood estimation or a Markov-Chain Monte Carlo
estimation using the code emcee. Before using it, it is highly recommended to
read the documentation of emcee in order to understand what are priors,
probabilities, sampling and other jargon. Check it out at
http://dan.iel.fm/emcee/current/
"""
# Estimate orbital parameters from radial velocity data comprising at least one
# orbit.
class FullOrbit(object):
"""
A class that computes the orbital parameters of a binary system given its
radial velocities (and their uncertainties) in function of time. This class
is optimized for time series that contain at least one full or almost full
orbital period.
Parameters
----------
datasets : sequence or ``radial.dataset.RVDataSet``
A list of ``RVDataSet`` objects or one ``RVDataSet`` object that
contains the data to be fit. If a sequence is passed, the order that
the data sets in the sequence will dictate which instrumental parameter
(gamma, sigma) index correspond to each data set.
guess : ``dict``
First guess of the orbital parameters. The keywords must match to the
names of the parameters to be fit. These names are: ``'k'``,
``'period'``, ``'t0'``, ``'omega'``, ``'ecc'``, ``'gamma_X'``,
``'sigma_X'`` (and so forth), where 'X' is the index of the data set.
use_add_sigma : ``bool``, optional
If ``True``, the code will use additional parameter to estimate an extra
uncertainty term for each RV data set. Default is ``False``.
parametrization : ``str``, optional
The parametrization for the orbital parameter search. Currently
available options are ``'mc10'`` and ``'exofast'``. Default is
``'mc10'``.
"""
def __init__(self, datasets, guess, use_add_sigma=False,
parametrization='mc10'):
self.datasets = datasets
self.guess = guess
if parametrization == 'mc10' or parametrization == 'exofast':
self.parametrization = parametrization
else:
raise ValueError('parametrization must be "mc10" or "exofast".')
if isinstance(datasets, dataset.RVDataSet):
self.n_ds = 1
else:
self.n_ds = len(datasets)
# Check if the datasets are passed as RVDataSet objects
for dsk in self.datasets:
assert isinstance(dsk,
dataset.RVDataSet), 'The datasets must be ' \
'passed as RVDataSet ' \
'objects.'
# Read the data
self.t = []
self.rv = []
self.rv_unc = []
self.meta = []
for dsk in self.datasets:
self.t.append(dsk.t.to(u.d).value)
self.rv.append(dsk.rv.to(u.m / u.s).value)
self.rv_unc.append(dsk.rv_unc.to(u.m / u.s).value)
self.meta.append(dsk.table.meta)
self.use_add_sigma = use_add_sigma
# The global parameter keywords to be used in the code
self.keys = list(self.guess.keys())
# Initializing useful global variables
self.lmfit_result = None
self.lmfit_chisq = None
self.residuals = None
self.sampler = None
self.emcee_chains = None
self.ndim = None
self.best_params = {}
for key in self.keys:
self.best_params[key] = None
# Compute a periodogram of a data set
def lomb_scargle(self, dset_index, freqs):
"""
Compute a Lomb-Scargle periodogram for a given data set using
``scipy.signal.lombscargle``.
Parameters
----------
dset_index : ``int``
Index of the data set to have the periodogram calculated for.
freqs : ``array_like``
Angular frequencies for output periodogram.
Returns
-------
pgram : ``array_like``
Lomb-Scargle periodogram.
fig : ``matplotlib.pyplot.figure``
Periodogram plot.
"""
x_array = self.t[dset_index]
y_array = self.rv[dset_index]
pgram = ss.lombscargle(x=x_array, y=y_array, freqs=freqs)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(111)
ax.semilogx(freqs, pgram)
return pgram, fig
# Plot the data sets
def plot_rvs(self, legend_loc=None, symbols=None, plot_guess=False,
plot_samples=False, fold=False, numpoints=1000):
"""
Plot the data sets.
Parameters
----------
legend_loc : ``int`` or ``None``, optional
Location of the legend. If ``None``, use the default from
``matplotlib``. Default is ``None``.
symbols : sequence or ``None``, optional
List of symbols for each data set in the plot. If ``None``, use
the default list from ``matplotlib`` markers. Default is ``None``.
plot_guess : ``bool``, optional
If ``True``, also plots the guess as a black curve, and the RVs of
each data set is shifted by its respective gamma value.
plot_samples : ``bool``, optional
If ``True``, also plots the samples obtained with the ``emcee``
estimation. Default is ``False``.
fold : ``bool``, optional
If ``True``, plot the radial velocities by folding them around the
estimated orbital period. Default is ``False``.
numpoints : ``int``, optional
Number of points to compute the radial velocities curve. Default is
``1000``.
"""
# Plot of emcee samples is not implemented yet.
if plot_samples is True:
raise NotImplementedError('Plot of emcee samples is not supported'
'yet.')
# Use matplotlib's default symbols if ``None`` is passed.
if symbols is None:
markers = mrk.MarkerStyle()
symbols = markers.filled_markers
fig = plt.figure(figsize=(6, 5))
gs = plt.GridSpec(2, 1, height_ratios=(4, 1))
ax_fit = fig.add_subplot(gs[0])
self.residuals = []
for i in range(self.n_ds):
if plot_guess is True:
ax_res = fig.add_subplot(gs[1], sharex=ax_fit)
# First we figure out the bounds of the plot
t_min = min([min(tk) for tk in self.t])
t_max = max([max(tk) for tk in self.t])
if fold is False:
t_guess = np.linspace(t_min, t_max, numpoints)
else:
t_guess = np.linspace(0, 1, numpoints)
# Compute the radial velocities for the guess
system = orbit.BinarySystem(k=self.guess['k'],
period=self.guess['period'],
t0=self.guess['t0'],
omega=self.guess['omega'],
ecc=self.guess['ecc'],
gamma=0)
if fold is False:
rv_guess = system.get_rvs(ts=t_guess)
else:
rv_guess = system.get_rvs(ts=t_guess * self.guess['period'])
rv_guess_samepoints = system.get_rvs(ts=self.t[i])
# Shift the radial velocities with the provided gamma
rvs = self.rv[i] - self.guess['gamma_{}'.format(i)]
# Compute residuals
res = rv_guess_samepoints - rvs
self.residuals.append(res)
# And finally
if fold is False:
# Plot the data and the curve
ax_fit.errorbar(self.t[i], rvs, yerr=self.rv_unc[i],
fmt=symbols[i],
label=self.meta[i]['Instrument'])
ax_fit.plot(t_guess, rv_guess, color='k')
# Plot the residuals
ax_res.errorbar(self.t[i], res, yerr=self.rv_unc[i],
fmt=symbols[i])
ax_res.set_ylabel('Residuals\n(m s$^{-1}$)')
# Trick to make y-axis in residuals symmetric
y_min = abs(np.min(res - self.rv_unc[i]))
y_max = abs(np.max(res + self.rv_unc[i]))
y_limit = max([y_min, y_max]) * 1.1
ax_res.set_ylim(-y_limit, y_limit)
ax_res.axhline(y=0.0, linewidth=1, color='k', ls='--')
plt.setp(ax_res.get_xticklabels(), visible=False)
else:
# Plot the data and the curve
phase = (self.t[i] / self.guess['period']) % 1
ax_fit.errorbar(phase, rvs, yerr=self.rv_unc[i],
fmt=symbols[i],
label=self.meta[i]['Instrument'])
ax_fit.plot(t_guess, rv_guess, color='k')
ax_fit.set_xlim(0.0, 1.0)
# Plot the residuals
ax_res.errorbar(phase, res, yerr=self.rv_unc[i],
fmt=symbols[i])
ax_res.set_ylabel('Residuals\n(m s$^{-1}$)')
# Trick to make y-axis in residuals symmetric
y_min = abs(np.min(res - self.rv_unc[i]))
y_max = abs(np.max(res + self.rv_unc[i]))
y_limit = max([y_min, y_max]) * 1.1
ax_res.set_ylim(-y_limit, y_limit)
ax_res.axhline(y=0.0, linewidth=1, color='k', ls='--')
plt.setp(ax_res.get_xticklabels(), visible=False)
else:
if fold is False:
ax_fit.errorbar(self.t[i], self.rv[i], yerr=self.rv_unc[i],
fmt=symbols[i],
label=self.meta[i]['Instrument'])
else:
phase = (self.t[i] / self.guess['period']) % 1
ax_fit.errorbar(phase, self.rv[i], yerr=self.rv_unc[i],
fmt=symbols[i],
label=self.meta[i]['Instrument'])
# Show the plot
if fold is False:
ax_fit.set_xlabel('Time (d)')
else:
ax_fit.set_xlabel('Phase')
ax_fit.set_ylabel('Radial velocities (m s$^{-1}$)')
ax_fit.set_title('{}'.format(self.meta[0]['Target']))
ax_fit.legend(loc=legend_loc, numpoints=1)
plt.tight_layout()
if plot_guess is False:
return ax_fit
else:
return fig, gs
# The log-likelihood
def lnlike(self, theta):
"""
Log-likelihood of a given set of parameters to adequately describe the
observed data.
Parameters
----------
theta : ``dict`` or ``lmfit.Parameters``
The orbital parameters.
Returns
-------
sum_res : scalar
The log-likelihood.
"""
try:
# If ``theta`` is an ``lmfit.Parameters`` object
v = theta.valuesdict()
except AttributeError:
# If ``theta`` is a regular ``dict`` object
v = theta
sum_res = 0
for i in range(self.n_ds):
# Compute the RVs using the appropriate model
if self.parametrization == 'mc10':
rvs = rv_model.mc10(self.t[i], v['log_k'], v['log_period'],
v['t0'], v['omega'], v['ecc'],
v['gamma_{}'.format(i)])
elif self.parametrization == 'exofast':
rvs = rv_model.exofast(self.t[i], v['log_k'], v['log_period'],
v['t0'], v['sqe_cosw'], v['sqe_sinw'],
v['gamma_{}'.format(i)])
# If user wants to estimate additional sigma
if self.use_add_sigma is False:
inv_sigma2 = (1. / (self.rv_unc[i] ** 2))
elif self.use_add_sigma is True:
log_sigma_j = theta['log_sigma_{}'.format(i)]
inv_sigma2 = (1. / (self.rv_unc[i] ** 2 +
(10 ** log_sigma_j) ** 2))
# The log-likelihood
sum_res += np.sum((self.rv[i] - rvs) ** 2 * inv_sigma2 +
np.log(2. * np.pi / inv_sigma2))
return sum_res
# Prepare an ``lmfit.Parameters`` object
def prepare_params(self, theta, vary_param=None):
"""
Prepare a ``lmfit.Parameters`` object to be used in the ``lmfit``
estimation.
Parameters
----------
theta : ``dict``
The current orbital parameters.
vary_param : ``dict``
Dictionary that says which parameters will vary in the estimation.
By default, all parameters vary. A parameter can be fixed by setting
its key to ``False``.
Returns
-------
params : ``lmfit.Parameters``
The ``lmfit.Parameters`` object for the estimation.
"""
# Setting up the vary params dict
vary = {}
keys = list(theta.keys())
for key in keys:
vary[key] = True
if vary_param is not None:
for key in keys:
try:
vary[key] = vary_param[key]
except KeyError:
pass
params = lmfit.Parameters()
# Set the bounds for omega and ecc or sqe_cosw and sqe_sinw
if self.parametrization == 'mc10':
bounds = {'omega': (-np.pi, np.pi), 'ecc': (1E-6, 0.99999)}
elif self.parametrization == 'exofast':
bounds = {'sqe_cosw': (-1, 1), 'sqe_sinw': (-1, 1)}
# Generate the parameters object
for key in keys:
try:
params.add(key, theta[key], vary=vary[key],
min=bounds[key][0], max=bounds[key][1])
except KeyError:
params.add(key, theta[key], vary=vary[key])
return params
# Estimation using lmfit
def lmfit_orbit(self, vary=None, verbose=True, update_guess=False,
minimize_mode='Nelder'):
"""
Perform a fit to the radial velocities datasets using
``lmfit.minimize``.
Parameters
----------
vary : ``dict`` or ``None``, optional
Dictionary with keywords corresponding to each parameter, and
entries that are ``True`` if the parameter is to be left to vary, or
``False`` if the parameter is to be fixed in the value provided by
the guess. If ``None``, all parameters will vary. Default is
``None``.
verbose : ``bool``, optional
If ``True``, print output in the screen. Default is ``False``.
update_guess : ``bool``, optional
If ``True``, updates ``~estimate.FullOrbit.guess`` with the
estimated values from the minimization. Default is ``False``.
minimize_mode : ``str``, optional
The minimization algorithm string. See the documentation of
``lmfit.minimize`` for a list of options available. Default is
``'Nelder'``.
Returns
-------
result : ``lmfit.MinimizerResult``
The resulting ``MinimizerResult`` object.
"""
# Prepare the ``lmfit.Parameters`` object
guess = {'log_k': np.log10(self.guess['k']),
'log_period': np.log10(self.guess['period']),
't0': self.guess['t0']}
if self.parametrization == 'mc10':
guess['omega'] = self.guess['omega']
guess['ecc'] = self.guess['ecc']
elif self.parametrization == 'exofast':
guess['sqe_cosw'] = np.sqrt(self.guess['ecc']) * \
np.cos(self.guess['omega'])
guess['sqe_sinw'] = np.sqrt(self.guess['ecc']) * \
np.sin(self.guess['omega'])
for i in range(self.n_ds):
guess['gamma_{}'.format(i)] = self.guess['gamma_{}'.format(i)]
if self.use_add_sigma is True:
guess['log_sigma_{}'.format(i)] = \
np.log10(self.guess['sigma_{}'.format(i)])
params = self.prepare_params(guess, vary)
# Perform minimization
self.lmfit_result = lmfit.minimize(self.lnlike, params,
method=minimize_mode)
self.lmfit_chisq = self.lmfit_result.chisqr
# Updating global variable best_params
keys = list(guess.keys())
for key in keys:
self.best_params[key] = self.lmfit_result.params[key].value
if update_guess is True:
self.guess['k'] = 10 ** self.best_params['log_k']
self.guess['period'] = 10 ** self.best_params['log_period']
self.guess['t0'] = self.best_params['t0']
if self.parametrization == 'mc10':
self.guess['omega'] = self.best_params['omega']
self.guess['ecc'] = self.best_params['ecc']
elif self.parametrization == 'exofast':
self.guess['omega'] = np.arctan2(self.best_params['sqe_sinw'],
self.best_params['sqe_cosw'])
self.guess['ecc'] = self.best_params['sqe_sinw'] ** 2 + \
self.best_params['sqe_cosw'] ** 2
for i in range(self.n_ds):
self.guess['gamma_{}'.format(i)] = \
self.best_params['gamma_{}'.format(i)]
if self.use_add_sigma is True:
self.guess['sigma_{}'.format(i)] = \
10 ** self.best_params['log_sigma_{}'.format(i)]
if verbose is True:
for key in keys:
print('{} = {}'.format(key, self.best_params[key]))
return self.best_params
def compute_dynamics(self, main_body_mass=1.0):
"""
Compute the mass and semi-major axis of the companion defined by the
orbital parameters.
Parameters
----------
main_body_mass : ``float``, optional
The mass of the main body which the companion orbits, in units of
solar masses. Default is 1.0.
"""
mbm = main_body_mass * u.solMass
k = self.guess['k'] * u.m / u.s
period = self.guess['period'] * u.d
ecc = self.guess['ecc']
# Compute mass function f
f = (period * k ** 3 * (1 - ecc ** 2) ** (3 / 2) /
(2 * np.pi * c.G)).to(u.solMass)
# Compute msini
msini = abs(np.roots([1, -f.value, -2 * mbm.value * f.value,
-mbm.value ** 2 * f.value])[0]) * u.solMass
# Compute the semi-major axis in km and convert to AU
semi_a = (np.sqrt(c.G * msini * period / k / (2 * np.pi) /
np.sqrt(1. - ecc ** 2))).to(u.AU)
return msini.value, semi_a.value
# The probability
def lnprob(self, theta_list):
"""
This function calculates the ln of the probabilities to be used in the
MCMC estimation.
Parameters
----------
theta_list: sequence
Returns
-------
The probability of the signal rv being the result of a model with the
parameters theta
"""
# The common parameters
theta = {'log_k': theta_list[0],
'log_period': theta_list[1],
't0': theta_list[2]}
# Parametrization option-specific parameters
if self.parametrization == 'mc10':
theta['omega'] = theta_list[3]
theta['ecc'] = theta_list[4]
elif self.parametrization == 'exofast':
theta['sqe_cosw'] = theta_list[3]
theta['sqe_sinw'] = theta_list[4]
# Instrumental parameters
for i in range(self.n_ds):
theta['gamma_{}'.format(i)] = theta_list[5 + i]
if self.use_add_sigma is True:
theta['log_sigma_{}'.format(i)] = theta_list[5 + self.n_ds + i]
lp = prior.flat(theta, self.parametrization)
params = self.prepare_params(theta)
if not np.isfinite(lp):
return -np.inf
return lp - 0.5 * self.lnlike(params)
# Using emcee to estimate the orbital parameters
def emcee_orbit(self, nwalkers=20, nsteps=1000, p_scale=2.0, nthreads=1,
ballsizes=None):
"""
Calculates samples of parameters that best fit the signal rv.
Parameters
----------
nwalkers : ``int``
Number of walkers
nsteps : ``int``
Number of burning-in steps
p_scale : ``float``, optional
The proposal scale parameter. Default is 2.0.
nthreads : ``int``
Number of threads in your machine
ballsizes : ``dict``
The one-dimensional size of the volume from which to generate a
first position to start the chain.
Returns
-------
sampler : ``emcee.EnsembleSampler``
The resulting sampler object.
"""
if ballsizes is None:
ballsizes = {'log_k': 1E-4, 'log_period': 1E-4, 't0': 1E-4,
'omega': 1E-4, 'ecc': 1E-4, 'sqe_cosw': 1E-4,
'sqe_sinw': 1E-4, 'gamma': 1E-4, 'log_sigma': 1E-4}
# The labels
if self.parametrization == 'mc10':
self.labels = ['\log{K}', '\log{T}', 't_0', '\omega',
'e']
elif self.parametrization == 'exofast':
self.labels = ['\log{K}', '\log{T}', 't_0',
'\sqrt{e} \cos{\omega}',
'\sqrt{e} \sin{\omega}']
for i in range(self.n_ds):
self.labels.append('\gamma_{}'.format(i))
if self.use_add_sigma is True:
self.labels.append('\log{\sigma_%s}' % str(i))
# Creating the pos array
pos = []
for n in range(nwalkers):
pos_n = [np.log10(self.guess['k']) + ballsizes['log_k'] *
np.random.normal(),
np.log10(self.guess['period']) + ballsizes['log_period'] *
np.random.normal(),
self.guess['t0'] + ballsizes['t0'] * np.random.normal()]
if self.parametrization == 'mc10':
pos_n.append(self.guess['omega'] + ballsizes['omega'] *
np.random.normal())
pos_n.append(self.guess['ecc'] + ballsizes['ecc'] *
np.random.normal())
elif self.parametrization == 'exofast':
sqe_cosw = np.sqrt(self.guess['ecc']) * \
np.cos(self.guess['omega'])
sqe_sinw = np.sqrt(self.guess['ecc']) * \
| np.sin(self.guess['omega']) | numpy.sin |
from collections import *
from itertools import *
import numpy as np
from .timeout_error import timeout, TimeoutError
def is_valid(i, j, domain_metadata, peptide_metadata, models):
di, pi = domain_metadata.get(i, list()), peptide_metadata.get(i, list())
dj, pj = domain_metadata.get(j, list()), peptide_metadata.get(j, list())
def _is_valid_oneway(domains, peptides):
dtypes = set(d[0] for d in domains)
valid_ptypes = set(p for d in dtypes for p in models.models[d].keys())
ptypes = set(p[0] for p in peptides)
return len(ptypes.intersection(valid_ptypes)) > 0
return _is_valid_oneway(di,pj) or _is_valid_oneway(dj,pi)
@timeout(2)
def _predict_ppi(interactions, max_ensemble_size):
def _valid_ensembles(ensemble_size, interactions=interactions):
for idxes in combinations(range(len(interactions)), r=ensemble_size):
peptides = set(tuple(interactions[i][1]) for i in idxes)
domains = set(tuple(interactions[i][0]) for i in idxes)
if len(peptides) == len(domains) == ensemble_size:
yield list(idxes)
binding = np.array([i[-1] for i in interactions])
not_bound = np.prod(1-binding)
bound = 0
for ensemble_size in range(1, max_ensemble_size + 1):
changed = False
for ensemble_idxes in _valid_ensembles(ensemble_size):
factor = | np.prod(binding[ensemble_idxes]) | numpy.prod |
import numpy as np
import os
import lzma
from tqdm import tqdm
import json
CKPT = 100
vols = [os.path.join('segmentation', o) for o in os.listdir(
'segmentation') if os.path.isdir(os.path.join('segmentation', o))]
try:
with open('done_vols_downsampled.json') as f:
done = json.load(f)
except:
done = []
for vol in vols:
if os.path.exists(os.path.join(vol, 'segmentation_new.lzma')):
done.append(vol)
have_yet_tasks = [vol for vol in vols if vol not in done]
def downsample(vol_path):
with lzma.open(os.path.join(vol_path, "segmentation.lzma")) as lzma_file:
segmentation = np.frombuffer(lzma_file.read(),dtype=np.uint16).reshape(256,256,256)
total_unique, total_counts = np.unique(segmentation, return_counts=True)
total_counts_dict = dict(zip(total_unique, total_counts))
downsampled_img = np.zeros((64,64,64), dtype=np.uint16)
for i in range(64):
for j in range(64):
for k in range(64):
subset = segmentation[i*4:i*4+4, j*4:j*4+4, k*4:k*4+4]
unique, counts = | np.unique(subset, return_counts=True) | numpy.unique |
import os
import cv2
import nrrd
import torch
import random
import pydicom
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from torch.utils import data
from matplotlib import pyplot as plt
from skimage.transform import resize
from torchvision import transforms as T
from config import opt
import data_maker as dm
def number_parameters(Net, type_size=8):
para = sum([np.prod(list(p.size())) for p in Net.parameters()])
return para / 1024 * type_size / 1024
class Counter:
def __init__(self):
self.count, self.sum, self.avg = 0, 0, 0
return
def updata(self, value, num_updata=1):
self.count += num_updata
self.sum += value * num_updata
self.avg = self.sum / self.count
return
def clear(self):
self.count, self.sum, self.avg = 0, 0, 0
return
def get_root_file(root_file):
list_file = os.listdir(root_file)
list_file.sort(key=lambda x: str(x.split('.')[0]))
return list_file
def create_root(root_name):
if os.path.exists(root_name):
os.makedirs(root_name)
return
def read_txt_file(txt_root):
txt_file = open(txt_root).read()
data_list = []
for row in txt_file.split("\n"):
if row != '':
data_list += [row.split(" ")]
return data_list
def add_window(image, WL=200, WW=1000):
WLL = WL - (WW / 2)
image = (image - WLL) / WW * 255
image[image < 0] = 0
image[image > 255] = 255
image2 = np.zeros([image.shape[0], image.shape[1], image.shape[2]], dtype=np.uint8)
image2[:, :, :] = image[:, :, :]
return image2
def read_dicom(root_dicom, dim_change=False, window_add=False):
list_dicom = get_root_file(root_dicom)
num_dicom = len(list_dicom)
dicom_data = np.zeros([num_dicom, 512, 512])
for i in range(num_dicom):
file_root = root_dicom + list_dicom[i]
dcm = pydicom.dcmread(file_root)
matrix = dcm.pixel_array * dcm.RescaleSlope + dcm.RescaleIntercept
dicom_data[i, :, :] = matrix
if dim_change:
dicom_data = dicom_data.transpose((2, 1, 0))
if window_add:
dicom_data = add_window(dicom_data)
return dicom_data
def info_data(matrix_data):
type_data = type(matrix_data)
if type(matrix_data) is torch.Tensor:
if matrix_data.is_cuda:
matrix_data = matrix_data.detach().cpu()
matrix_data = np.array(matrix_data)
print('data_type/dtype/shape: ', type_data, matrix_data.dtype, matrix_data.shape)
print('min/max: ', np.min(matrix_data), np.max(matrix_data), 'num</=/>zero: ', np.sum(matrix_data < 0),
np.sum(matrix_data == 0), np.sum(matrix_data > 0))
return
def connect_volume_views(volume_views):
ret_volume_views = volume_views[0]
for i in range(1, 4):
ret_volume_views = np.concatenate((ret_volume_views, volume_views[i]), axis=0)
return ret_volume_views
def check_cubic_sequence(cubic_sequence):
ret_cubic_sequence = connect_volume_views(longitudinal_mpr(cubic_sequence[0]))
for i in range(1, cubic_sequence.shape[0]):
tmp_volume_views = connect_volume_views(longitudinal_mpr(cubic_sequence[i]))
ret_cubic_sequence = np.concatenate((ret_cubic_sequence, tmp_volume_views), axis=1)
return ret_cubic_sequence
def get_world_position(dicom_root):
return pydicom.dcmread(dicom_root)[0X0020, 0X0032].value
# dicom_position = pydicom.dcmread(dicom_root)[0X0020, 0X0032].value
# return np.array([dicom_position[0], dicom_position[1], dicom_position[2]])
def world2voxel(centerpoint, world_position, label_threshold=0):
return np.array([int((float(centerpoint[2]) - world_position[2]) / (world_position[2] * 2)),
int((float(centerpoint[1]) - world_position[1]) / (world_position[0] * 2)),
int((float(centerpoint[0]) - world_position[0]) / (world_position[0] * 2)),
int(1 if int(centerpoint[6]) > label_threshold else 0)], dtype=np.int16)
def deduplication(center_info):
ret_center_info, last_point = [center_info[0]], [center_info[0, 0], center_info[0, 1], center_info[0, 2]]
for i in range(1, center_info.shape[0]):
now_point = [center_info[i, 0], center_info[i, 1], center_info[i, 2]]
if last_point != now_point:
ret_center_info += [center_info[i]]
last_point = now_point
return np.array(ret_center_info, dtype=np.int16)
def calculate_distance(point1, point2):
tmp1 = (point1[0] - point2[0]) * (point1[0] - point2[0])
tmp2 = (point1[1] - point2[1]) * (point1[1] - point2[1])
tmp3 = (point1[2] - point2[2]) * (point1[2] - point2[2])
return (tmp1 + tmp2 + tmp3) ** 0.5
def check_neighbor(point1, point2):
move = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[-1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1],
[-1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1]],
dtype=int)
for j in range(26):
if point1[0] - point2[0] == move[0, j] and point1[1] - point2[1] == move[1, j] and point1[2] - point2[2] == \
move[2, j]:
return True
return False
def connection_point(point1, point2):
move = np.array([[-1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[-1, -1, -1, 0, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 1, 1, 1, -1, -1, -1, 0, 0, 0, 1, 1, 1],
[-1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1, -1, 0, 1]],
dtype=int)
add_point, tmp_bestp = [], point1
while (check_neighbor(tmp_bestp, point2) == False):
tmp_direction, tmp_mindis = -1, -1
for i in range(26):
tmp_point = [tmp_bestp[0] + move[0, i], tmp_bestp[1] + move[1, i], tmp_bestp[2] + move[2, i]]
tmp_distance = calculate_distance(tmp_point, point2)
if tmp_mindis == -1 or tmp_distance < tmp_mindis:
tmp_direction, tmp_mindis = i, tmp_distance
tmp_bestp = [tmp_bestp[0] + move[0, tmp_direction], tmp_bestp[1] + move[1, tmp_direction],
tmp_bestp[2] + move[2, tmp_direction]]
add_point += [tmp_bestp]
return add_point
def center_continuity(center_info):
last_point = center_info[0].tolist()
ret_center_info = [last_point]
for i in range(1, center_info.shape[0]):
now_point = center_info[i].tolist()
if not check_neighbor(now_point[:3], last_point[:3]):
add_point = connection_point(last_point[:3], now_point[:3])
for tmp_point in add_point:
ret_center_info += [[tmp_point[0], tmp_point[1], tmp_point[2], max(now_point[3], last_point[3])]]
ret_center_info += [now_point]
last_point = now_point
return np.array(ret_center_info, dtype=np.int16)
def voxel_position(centerline, world_position):
aaa = np.array(centerline, dtype=np.float)
ret_centerline = np.zeros([len(centerline), 4], dtype=np.int16)
for i in range(len(centerline)):
ret_centerline[i] = world2voxel(centerline[i], world_position)
return center_continuity(deduplication(ret_centerline))
def check_continuity(center_info):
last_point = center_info[0].tolist()
for i in range(1, center_info.shape[0]):
now_point = center_info[i].tolist()
if not check_neighbor(now_point[:3], last_point[:3]):
return False
last_point = now_point
return True
def check_rationality(dicom, center_info):
if not check_continuity(center_info):
return False
if np.max(center_info[:, 0]) >= dicom.shape[0] or np.max(center_info[:, 1]) >= dicom.shape[1] or np.max(
center_info[:, 2]) >= dicom.shape[2]:
return False
if | np.sum(center_info[:, 3]) | numpy.sum |
import numpy as np
from collections import namedtuple
from sklearn.utils.extmath import cartesian
from scipy.stats import mode
from scipy.stats import itemfreq
from attrdict import AttrDict
import pdb
def max_product_update_var(state, messages, sender_id, recipient_id):
variable_index = sender_id[1:]
factor_index = recipient_id[1:]
outgoing_message = MaxProductVariableNode(sender_id,
messages).update_edge_message(recipient_id)
return outgoing_message
def max_product_update_fac(state, messages, sender_id, recipient_id):
variable_index = sender_id[1:]
factor_index = recipient_id[1:]
outgoing_message = MaxProductFactorNode(sender_id,
messages).update_edge_message(recipient_id)
return outgoing_message
class MaxProductNode():
def __init__(self, node_id, incoming_messages):
self.node_id = node_id
self.incoming_messages = [AttrDict({'message':
np.array([1-neighbor_message, neighbor_message]), 'variable_cost': 1,
'node_id': neighbor_id}) for neighbor_id, neighbor_message in
incoming_messages.items()]
class MaxProductVariableNode(MaxProductNode):
def __init__(self, variable_id, incoming_messages):
MaxProductNode.__init__(self, variable_id, incoming_messages)
def update_edge_message(self, neighbor_to_update):
updated_edges = self.update_edges()
return [edge.message for edge in updated_edges if edge.node_id ==
neighbor_to_update][0][1]
def update_edges(self):
edges = self.incoming_messages
node_state = self.__node_state_from_edges(edges)
new_edges = self.__edges_from_node_state(node_state, edges)
return new_edges
def update_edge_marginals(self, edges):
marginal = self.__marginals_from_edges(edges)
edges_with_marginals = self.__edges_from_marginals(marginal, edges)
return edges_with_marginals
def __node_state_from_edges(self, edges):
variable_cost_mean = edges[0].variable_cost
variable_cost = variable_cost_mean#np.sign(variable_cost_mean)*np.random.exponential(np.abs(variable_cost_mean))
message_product = np.array([1, np.exp(-1*variable_cost)])*self.__compute_message_product(edges)
return self.__normalize_message(message_product)
def __edges_from_node_state(self, node_state, edges):
return [self.__compute_new_neighbor_message(node_state, edge) for edge in edges]
def __marginals_from_edges(self, edges):
unnormalized_marginal = self.__node_state_from_edges(edges)
marginal = self.__normalize_message(unnormalized_marginal)
return marginal
def __edges_from_marginals(self, marginal, edges):
[setattr(edge, 'message', marginal) for edge in edges]
return edges
# Helper Methods
def __compute_message_product(self, edges):
edge_array = np.array([edge.message for edge in edges])
message_product = np.prod(edge_array, axis=0)
return message_product
def __compute_new_neighbor_message(self, message_product, edge):
new_edge_message = \
self.__normalize_message(np.nan_to_num(message_product/edge.message))
edge.message = new_edge_message
return edge
def __normalize_message(self, message):
noise = 1#np.array([0,1])*np.exp(np.random.normal())
return message/float(message.sum()) if message.sum() > 0 else | np.array([0.5, 0.5]) | numpy.array |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.express as px
import dash_bootstrap_components as dbc
import pandas as pd
import pickle as pk
import plotly.graph_objects as go
import numpy as np
from sklearn.preprocessing import minmax_scale
from Saving import *
from tsmoothie.smoother import *
from block_dev import *
fc = pd.read_csv('full_corpus.csv', index_col='Unnamed: 0')
pypl = fc.loc[(fc.Comp == 'pypl') & (fc.message != 'n')].iloc[-5000:]
embs = openPk('full_embs.pkl')
embs = embs[5]
t_vecs = openPk('topNg_vecs.pkl')
t_vecs = t_vecs[5]
t_vecs_plot = openPk('top_vecs4plot.pkl')
comps = ['aapl', 'abb', 'amzn', 'aon', 'bmy', 'cern', 'csco', 'ebay', 'hsbc', 'jpm', 'mmm', 'nflx', 'pypl']
tg_words = pd.read_csv('topic_words_wGram.csv', index_col='Topic').iloc[1:]
tNg_words = pd.read_csv('topic_words_noGram.csv', index_col='Topic').iloc[1:]
fig = go.Figure(data=go.Scatter(x=pypl['createdAt'], y=pypl['deviation'], hovertext=pypl['message']))
Deviation = [
dbc.CardHeader(html.H5('Measuring average devtiation over time - PayPal')),
dbc.CardBody([
dbc.Row([
dcc.Graph(id='topic-deviation', figure = fig)
]),
dbc.Row([
dbc.Col([
html.Div(id='wind-num', children='hdbdhbs'),
dcc.Slider(id='wind-slider', min = 2, max = 100, step=1, value=4)
])
])
])
]
Top_vec_comp = [
dbc.CardHeader(html.H5('Comparing Topic Vectors')),
dbc.CardBody([
dbc.Row([
dbc.Col(
dcc.Dropdown(
id = 't_set1',
options = [
{'label': 'No-Grams 2D', 'value': 'ng2D'},
{'label': 'No-Grams 5D r-dims', 'value': 'ng5D'},
{'label': 'With-Grams 2D', 'value': 'g2D'},
{'label': 'With-Grams 5D r-dims', 'value': 'g5D'},
],
placeholder = 'Graph 1',
value = 'ng2D',
), width={'size': 3}
),
dbc.Col(
dcc.Dropdown(
id = 't_set2',
options = [
{'label': 'No-Grams 2D', 'value': 'ng2D'},
{'label': 'No-Grams 5D r-dims', 'value': 'ng5D'},
{'label': 'With-Grams 2D', 'value': 'g2D'},
{'label': 'With-Grams 5D r-dims', 'value': 'g5D'},
],
placeholder = 'Graph 1',
value = 'ng5D',
), width={'size': 3}
)
]),
dbc.Row([
dbc.Col(
dcc.Graph(id='t_graph1')
),
dbc.Col(
dcc.Graph(id='t_graph2')
)
])
])
]
dev_from_start = [
dbc.CardHeader(html.H5('Measuring deviation from start of chat')),
dbc.CardBody([
dbc.Row([
dcc.Graph(id='dev_s_g')
]),
dbc.Row([
dbc.Col([
html.Div(id='window-size', children='Enter starting window size:'),
dcc.Slider(id='dev-wind-slider', min = 1, max = 100, step=1, value=4)
]),
dbc.Col(
dcc.Dropdown(
id = 'ds_comp',
options = [{'label': i, 'value': i} for i in comps],
placeholder = 'Company',
value = 'aapl',
), width={'size': 3}
)
])
])
]
dev_from_main = [
dbc.CardHeader(html.H5('Measuring deviation from main topic')),
dbc.CardBody([
dbc.Row([
dcc.Graph(id='dev_m_g')
]),
dbc.Row([
dbc.Col(
dcc.Dropdown(
id = 'dm_comp',
options = [{'label': i, 'value': i} for i in comps],
placeholder = 'Company',
value = 'aapl'
), width={'size': 3}
),
dbc.Col(
dcc.Dropdown(
id = 'smoother',
options = [
{'label': 'No Smoothing', 'value': 'n'},
{'label': 'Exponential', 'value': 'exp'},
{'label': 'Convolutional', 'value': 'conv'},
{'label': 'Kalman', 'value': 'k'}
],
placeholder = 'Smoother',
value = 'n'
), width={'size': 3, 'offset': 3}
)
]),
dbc.Row(
dbc.Col(
dcc.Markdown(id='stats', style={'white-space': 'pre-wrap'})
)
)
])
]
block_devs = [
dbc.CardHeader(html.H5('Deviation Scores per Block')),
dbc.CardBody([
dbc.Row([
dcc.Graph(id='block_dev')
]),
dbc.Row([
dbc.Col([
html.Div(id='b_wind', children='Enter block window size:'),
dcc.Slider(id='b-wind-slider', min = 1, max = 200, step=2, value=50),
html.Div(id='b-disc', children='Discount factor:'),
dcc.Slider('b_disc_f', min=0, max = 1, step=0.02, value=0.3)
]),
dbc.Col([
dcc.Dropdown(
id = 'bd_comp',
options = [{'label': i, 'value': i} for i in comps],
placeholder = 'Company',
value = ['aapl'],
multi = True
),
dcc.Checklist(
id='auto',
options = [{'label': 'Auto-Block', 'value': 'ab'}],
value = ['ab']
)], width={'size': 3}
)
]),
dbc.Row([
dbc.Col(
dcc.Markdown(id='block_stats', style={'white-space': 'pre-wrap'})
),
dbc.Col(
dcc.Dropdown(
id = 'block_smoother',
options = [
{'label': 'No Smoothing', 'value': 'n'},
{'label': 'Convolutional', 'value': 'conv'},
{'label': 'Kalman', 'value': 'k'}
],
placeholder = 'Smoother',
value = 'n'
), width={'size': 3, 'offset': 3})
])
])
]
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = html.Div(children=[
html.H1('Topic Modelling', style={'align': 'center'}),
dbc.Container([
dbc.Row([
dbc.Col(dbc.Card(Deviation))
], style={'marginTop': 10}),
dbc.Row([
dbc.Col(dbc.Card(Top_vec_comp))
], style={'marginTop': 10}),
dbc.Row([
dbc.Col(dbc.Card(dev_from_start))
], style={'marginTop': 10}),
dbc.Row([
dbc.Col(dbc.Card(dev_from_main))
], style={'marginTop': 10}),
dbc.Row([
dbc.Col(dbc.Card(block_devs))
], style={'marginTop': 10})
])
])
# Block-level Deviation
@app.callback(
Output('b-wind-slider', 'value'),
[Input('auto', 'value'), Input('bd_comp', 'value')]
)
def check(val, comps):
if val == ['ab']:
winds = []
for comp in comps:
our_df = fc.loc[(fc.Comp == comp) & (fc.t2v != 'n')]
_, wind = split_df_auto(our_df, 0.05)
winds.append(wind)
return max(winds)
return 50
@app.callback(
Output('b-disc', 'children'),
[Input('b_disc_f', 'value')]
)
def show_df(val):
return 'Enter discount size: ' + str(val)
@app.callback(
Output('b_wind', 'children'),
[Input('b-wind-slider', 'value')]
)
def show_bs(w):
return 'Block Size: {} hours'.format(w)
@app.callback(
[Output('block_dev', 'figure'), Output('block_stats', 'children')],
[Input('bd_comp', 'value'), Input('b-wind-slider', 'value'), Input('block_smoother', 'value'), Input('b_disc_f', 'value')]
)
def block_main(comps, wind, s, disc_f):
fig = go.Figure()
stats = ''
main_ts = []
for comp in comps:
main_top, stat = get_block_devs(fig, comp, wind, s, disc_f)
main_ts.append(main_top)
stats += stat
fig.update_layout(
title="Deviation per block for {}".format(', '.join(comps)),
xaxis_title="Block Number",
yaxis_title="Deviation",
legend_title="Legend"
)
return fig, stats
# Dev from main callbacks
@app.callback(
Output('dev_m_g', 'figure'),
Output('stats', 'children'),
[Input('dm_comp', 'value'), Input('smoother', 'value')]
)
def dev_from_main(comp, s):
our_df = fc.loc[(fc.Comp == comp) & (fc.t2v != 'n')]
main_top = our_df['top_Ng'].value_counts().index.values[0]
if main_top == -1:
main_top = our_df['top_Ng'].value_counts().index.values[1]
main_top_vec = t_vecs[main_top, :]
nTops = t_vecs.shape[0]
main_vec = t_vecs[main_top, :]
dist_dict = {}
dist_dict[-1] = 0
for i in range(nTops):
cur_top = t_vecs[i, :]
similarity = | np.linalg.norm(cur_top - main_vec) | numpy.linalg.norm |
import contextlib
import math
from collections import defaultdict
from time import perf_counter
from warnings import filterwarnings
import numpy
import dask
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.distributed import Client, performance_report, wait
from dask.utils import format_bytes, format_time, parse_bytes
from dask_cuda.benchmarks.utils import (
get_cluster_options,
get_scheduler_workers,
parse_benchmark_args,
plot_benchmark,
setup_memory_pool,
)
from dask_cuda.utils import all_to_all
# Benchmarking cuDF merge operation based on
# <https://gist.github.com/rjzamora/0ffc35c19b5180ab04bbf7c793c45955>
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match, gpu):
# Setting a seed that triggers max amount of comm in the two-GPU case.
if gpu:
import cupy as xp
import cudf as xdf
else:
import numpy as xp
import pandas as xdf
xp.random.seed(2 ** 32 - 1)
chunk_type = chunk_type or "build"
frac_match = frac_match or 1.0
if chunk_type == "build":
# Build dataframe
#
# "key" column is a unique sample within [0, local_size * num_chunks)
#
# "shuffle" column is a random selection of partitions (used for shuffle)
#
# "payload" column is a random permutation of the chunk_size
start = local_size * i_chunk
stop = start + local_size
parts_array = xp.arange(num_chunks, dtype="int64")
suffle_array = xp.repeat(parts_array, math.ceil(local_size / num_chunks))
df = xdf.DataFrame(
{
"key": xp.arange(start, stop=stop, dtype="int64"),
"shuffle": xp.random.permutation(suffle_array)[:local_size],
"payload": xp.random.permutation(xp.arange(local_size, dtype="int64")),
}
)
else:
# Other dataframe
#
# "key" column matches values from the build dataframe
# for a fraction (`frac_match`) of the entries. The matching
# entries are perfectly balanced across each partition of the
# "base" dataframe.
#
# "payload" column is a random permutation of the chunk_size
# Step 1. Choose values that DO match
sub_local_size = local_size // num_chunks
sub_local_size_use = max(int(sub_local_size * frac_match), 1)
arrays = []
for i in range(num_chunks):
bgn = (local_size * i) + (sub_local_size * i_chunk)
end = bgn + sub_local_size
ar = xp.arange(bgn, stop=end, dtype="int64")
arrays.append( | xp.random.permutation(ar) | numpy.random.permutation |
# coding: utf-8
# データ用スクリプト
# 外部依存がなく、関数内で完結する関数が中心
import chainer
import chainer.links as L
import chainer.functions as F
from skimage import draw
from skimage import transform
import numpy as np
import cupy as cp
import math
import pickle
from bokeh.plotting import figure
from bokeh.layouts import gridplot
from bokeh.io import push_notebook, output_notebook
from PIL import Image
### Bokeh関連 ###
def get_bokeh_view(imgs, plot_w =512, plot_h =512):#
""" imgs = [[1/3ch][H][W], ……] (0-255 fp)(np or cp))
return : Bv (Bokehのviewを扱うグローバル辞書オブジェクト) """
_, imgH, imgW = imgs[0].shape
Bv = {}
Bv['p'] = []
for i in range(len(imgs)):
Bv['p'].append({})
if i == 0:#1枚目
Bv['p'][i]['fig'] = figure(title = '', x_range=[0, imgW], y_range=[imgH, 0])#y_range=[imgH, 0]によりy軸を反転
else :#2枚以上の場合
Bv['p'][i]['fig'] = figure(title = '', x_range=Bv['p'][0]['fig'].x_range, y_range=Bv['p'][0]['fig'].y_range)
v_img = bokeh_conv_view_img(imgs[i])#[1/3ch][H][W] ⇒ bokehのイメージに変換
Bv['p'][i]['img']= Bv['p'][i]['fig'].image_rgba(image=[v_img],x=[0], y=[imgH], dw=[imgW], dh=[imgH])#反転軸のためy=[imgH]
gplots = [Bv['p'][i]['fig'] for i in range( len(imgs))]
Bv['gp'] = gridplot( [gplots], plot_width = plot_w, plot_height = plot_h)
output_notebook()
from bokeh.io import show
Bv['handle'] = show(Bv['gp'], notebook_handle=True)
return Bv
def bokeh_conv_view_img(img):
""" [1/3ch][H][W] 0-255⇒ bokehのイメージに変換 """
if type(img) == cp.core.core.ndarray:#cupy配列の場合
img =cp.asnumpy(img)
ch, imgW, imgH= img.shape
if ch == 1:#1chの場合 ⇒ 3ch
img = np.broadcast_to(img, (3, imgH, imgW))
img = np.clip(img, 0, 255).transpose((1, 2, 0))
img_plt = np.empty((imgH,imgW), dtype="uint32")
view = img_plt.view(dtype=np.uint8).reshape((imgH, imgW, 4))
view[:, :, 0:3] = np.flipud(img[:, :, 0:3])#上下反転あり
view[:, :, 3] = 255
return img_plt
def bokeh_update_img(imgs, infos = []):#
""" アップデート表示 imgs = [[1/3ch][H][W], ……] (0-1 or 0-255 fp)(np or cp) """
for i in range(len(imgs)):
v_img = bokeh_conv_view_img(imgs[i])
Bv['p'][i]['img'].data_source.data['image'] = [v_img]
for i in range(len(infos)):
Bv['p'][i]['fig'].title.text= infos[i]
push_notebook(handle = Bv['handle'])
def bokeh_save_img(fname = 'out.png'):
"""bokeh画像を保存"""
from bokeh.io import export_png
export_png(Bv['gp'], filename = fname)
### 画像ファイル関係 ###
def load_img(fpath = 'imgA1.png'):
""" 画像ファイルパス ⇒ [1][ch][imgH][imgW] (0-255 fp32) を返却 """
img = np.asarray(Image.open(fpath))
img = img.astype("float32").transpose((2, 0, 1))
img = img.reshape((1,*img.shape))
return img
def save_png_img(img, fpath = 'result.png'):
"""PNG保存 img =[1][1 or 3ch][imgH][imgW] (0-255 fp32) ,fpath = 画像ファイルパス"""
if img.shape[1] == 1:#1ch
img = img[0][0]
elif img.shape[1] == 3:#3ch
img = img[0].transpose(1,2,0)
else :
print('入力chエラー')
return
img = (np.clip(img,0,255)).astype(np.uint8)
img = Image.fromarray(img)
img.save(fpath, format ='PNG')
def load_resize_img(fpath = 'imgA1.png', opath = 'resize.png', out_size = 512):
""" 画像ファイルパス ⇒ センターリサイズ ⇒ 一時保存 ⇒[1][ch][imgH][imgW] (0-1 or 0-255, fp32) を返却 """
#画像をRGB化、センター加工、リサイズ
def conv_RBG_squre_resize(img, out_size = 512):#imgはpil Image
img = img.convert('RGB')
w,h = img.size
if w > h :
box = ((w-h)//2, 0, (w-h)//2 + h, h)
elif h > w :
box = (0, (h-w)//2, w, (h-w)//2 + w)
else :
box = (0, 0, w, h)
img = img.resize((out_size, out_size), box = box)
return img
img = Image.open(fpath)
img = conv_RBG_squre_resize(img, out_size = out_size)#RGB化、center切り取り、リサイズ
img.save(opath, format ='PNG')#一時保存(imgはpilのままなので)
img = load_img(fpath = opath )#画像ファイルを[1][ch][imgH][imgW]で読み出し
return img
### データ加工関連 ###
def get_data_N_rand(DataO, N_pic =1, imgH = 256, imgW = 256, keys =['x','t_core']):
""" ランダムでデータの切り出し 入力:Data ⇒ return: 切り出し後の新たなDataを返却 """
Data={}
#切り出したデータの保存先 dim=[N][ch][imgH][imgW] ,float32
for key in keys:
Data[key] = np.zeros((N_pic, DataO[key].shape[1], imgH, imgW), dtype = "float32")
#切り出し限界を設定
xlim = DataO[keys[0]].shape[3] - imgW + 1
ylim = DataO[keys[0]].shape[2] - imgH + 1
im_num =np.random.randint(0, DataO[keys[0]].shape[0], size=N_pic)#複数枚の内、切り取る写真の番号
rotNo = np.random.randint(4, size=N_pic) #回転No
flipNo = np.random.randint(2, size=N_pic) #フリップNo
cutx = np.random.randint(0, xlim, size=N_pic)
cuty = | np.random.randint(0, ylim, size=N_pic) | numpy.random.randint |
# coding=utf-8
import flask
from flask import request,jsonify
import werkzeug
import os
import tensorflow as tf
import getConfig
import numpy as np
import pickle
import requests
import json
from PIL import Image
gConfig = {}
gConfig = getConfig.get_config(config_file='config.ini')
app = flask.Flask("imgClassifierWeb")
def CNN_predict():
global secure_filename
file = gConfig['dataset_path'] + "batches.meta"
patch_bin_file = open(file, 'rb')
label_names_dict = pickle.load(patch_bin_file)["label_names"]
img = Image.open(os.path.join(app.root_path, secure_filename))
img = img.convert("RGB")
r, g, b = img.split()
r_arr = | np.array(r) | numpy.array |
import numpy as np
from scipy import stats
from sklearn import metrics
import torch
def d_prime(auc):
standard_normal = stats.norm()
d_prime = standard_normal.ppf(auc) * np.sqrt(2.0)
return d_prime
def calculate_stats(output, target):
"""Calculate statistics including mAP, AUC, etc.
Args:
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
Returns:
stats: list of statistic of each class.
"""
classes_num = target.shape[-1]
stats = []
# Accuracy, only used for single-label classification such as esc-50, not for multiple label one such as AudioSet
acc = metrics.accuracy_score(np.argmax(target, 1), | np.argmax(output, 1) | numpy.argmax |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""Test the ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import os
import unittest
import numpy as np
from dragon.core.util import nest
from dragon.core.testing.unittest.common_utils import run_tests
from dragon.vm import torch
# Fix the duplicate linked omp runtime
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# Fix the numpy seed
np.random.seed(1337)
class OpTestCase(unittest.TestCase):
"""The base test case."""
precision = 1e-5
def __init__(self, method_name='runTest'):
super(OpTestCase, self).__init__(method_name)
def assertEqual(
self,
first,
second,
msg=None,
prec=None,
):
if prec is None:
prec = self.precision
inputs = nest.flatten(first)
num_first = len(inputs)
inputs += nest.flatten(second)
num_second = len(inputs) - num_first
for i, input in enumerate(inputs):
if isinstance(input, torch.Tensor):
inputs[i] = input.numpy()
first = inputs[:num_first] if num_first > 1 else inputs[0]
second = inputs[num_first:len(inputs)] if num_second > 1 else inputs[num_first]
if isinstance(first, np.ndarray) and isinstance(second, np.ndarray):
super(OpTestCase, self).assertEqual(first.shape, second.shape)
if first.dtype == bool and second.dtype == bool:
diff = first ^ second
num_unique = len(np.unique(diff))
self.assertLessEqual(num_unique, 1, msg)
else:
diff = np.abs(first - second)
max_err = diff.max()
self.assertLessEqual(max_err, prec, msg)
elif nest.is_sequence(first) and nest.is_sequence(second):
for a, b in zip(first, second):
self.assertEqual(a, b, msg, prec)
else:
super(OpTestCase, self).assertEqual(first, second, msg)
class TestTensorOps(OpTestCase):
"""Test the tensor ops."""
# Testing shapes for binary ops
unary_test_shapes = [(2,)]
# Testing shapes for binary ops
binary_test_shapes = [((2,), (2,)), ((2, 3), (3,)), ((2, 3), (2, 1))]
def test_abs(self):
data = np.array([-1., 0., 1.], 'float32')
x = new_tensor(data)
self.assertEqual(x.abs(), np.abs(data))
def test_add(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a + b, data1 + data2)
self.assertEqual(1 + a, 1 + data1)
a += b
self.assertEqual(a, data1 + data2)
def test_addmm(self):
entries = [((2, 3), (3, 4), (2, 4))]
for a_shape, b_shape, c_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
data3 = arange(c_shape)
a, b = new_tensor(data1), new_tensor(data2)
c = new_tensor(data3)
y = c.addmm(a, b)
self.assertEqual(y, np.matmul(data1, data2) + data3)
def test_argmax(self):
entries = [(0, True), (0, False), (1, True), (1, False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
result = np.argmax(data, axis)
if keepdims:
result = np.expand_dims(result, axis)
self.assertEqual(x.argmax(axis, keepdims), result)
def test_argmin(self):
entries = [(0, True), (0, False), (1, True), (1, False)]
for axis, keepdims in entries:
data = arange((2, 3))
x = new_tensor(data)
result = np.argmin(data, axis)
if keepdims:
result = np.expand_dims(result, axis)
self.assertEqual(x.argmin(axis, keepdims), result)
def test_atan2(self):
for a_shape, b_shape in self.binary_test_shapes:
data1, data2 = arange(a_shape), arange(b_shape, 1)
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a.atan2(b), np.arctan2(data1, data2))
def test_baddbmm(self):
entries = [((2, 2, 3), (2, 3, 4), (2, 2, 4))]
for a_shape, b_shape, c_shape in entries:
data1, data2 = arange(a_shape), arange(b_shape)
data3 = arange(c_shape)
a, b = new_tensor(data1), new_tensor(data2)
c = new_tensor(data3)
y = c.baddbmm(a, b)
self.assertEqual(y, np.matmul(data1, data2) + data3)
c.baddbmm_(a, b)
self.assertEqual(c, np.matmul(data1, data2) + data3)
def test_bitwise_and(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = arange(a_shape, dtype='int32')
data2 = arange(b_shape, 1, dtype='int32')
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a & b, np.bitwise_and(data1, data2))
a &= b
self.assertEqual(a, np.bitwise_and(data1, data2))
def test_bitwise_not(self):
for shape in self.unary_test_shapes:
data = np.random.binomial(1, 0.5, shape).astype('bool')
x = new_tensor(data)
self.assertEqual(~x, np.invert(data))
x.bitwise_not_()
self.assertEqual(x, np.invert(data))
def test_bitwise_or(self):
for a_shape, b_shape in self.binary_test_shapes:
data1 = arange(a_shape, dtype='int32')
data2 = arange(b_shape, 1, dtype='int32')
a, b = new_tensor(data1, False), new_tensor(data2, False)
self.assertEqual(a | b, np.bitwise_or(data1, data2))
a |= b
self.assertEqual(a, | np.bitwise_or(data1, data2) | numpy.bitwise_or |
import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
def ind_sub_conversion(img, ind2sub_fn, sub2ind_fn):
rows, cols = img.shape[:2]
num = rows*cols
arange = np.arange(rows*cols, dtype=np.int32)
ind2sub = np.empty((num, 2), dtype=np.int32)
ind2sub[:, 0] = np.floor(arange/cols)
ind2sub[:, 1] = | np.remainder(arange, cols) | numpy.remainder |
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
from settings import (SHIPS_PER_BOUNTY, HUNTING_MAX_RATIO, HUNTING_STEP,
HUNT_WEIGHT, HUNT_RADIUS, HUNT_NEARBY, STEPS_SPIKE)
class Bounties:
def __init__(self, state, ship_target_memory):
# number of bounties to set
self.num_targets = int(state.my_ship_pos.size // SHIPS_PER_BOUNTY)
# calculate position, halite, and vulnerability for all opponent ships
# vulnerability is the ratio of distance to the nearest friendly yard
# on the weighted graph over distance to the nearest friendly yard on
# a graph with constant weights equal to mean_weight. a vulnerability
# greater than one means we have hunters obstructing the path to the
# nearest yard...
opp_ship_pos = np.array([], dtype=int)
opp_ship_hal = np.array([], dtype=int)
opp_ship_vul = np.array([], dtype=float)
opp_ship_dis = np.array([], dtype=int)
graph = self.make_graph(state)
for opp in state.opp_data.values():
yards, ship_pos, ship_hal = opp[1:4]
if yards.size == 0:
ship_vul = np.full_like(ship_pos, 10)
ship_dis = np.full_like(ship_pos, 10)
else:
graph_dist = dijkstra(graph, indices=yards, min_only=True)
graph_dist = graph_dist[ship_pos]
ship_dis = np.amin(state.dist[np.ix_(yards, ship_pos)], axis=0)
ship_vul = (1 + graph_dist) / (1 + ship_dis)
opp_ship_pos = np.append(opp_ship_pos, ship_pos)
opp_ship_hal = np.append(opp_ship_hal, ship_hal)
opp_ship_vul = np.append(opp_ship_vul, ship_vul)
opp_ship_dis = np.append(opp_ship_dis, ship_dis)
# nearby contains the number of hunters within distance 3
# that also have strictly less cargo than the ship
nearby = state.dist[np.ix_(state.my_ship_pos, opp_ship_pos)] <= 3
less_hal = state.my_ship_hal[:, np.newaxis] < opp_ship_hal
nearby = np.sum(nearby & less_hal, axis=0)
# store current positions of previous targets that are still alive
prev = np.array([val[0] for key, val in state.opp_ships.items()
if key in ship_target_memory], dtype=int)
# get the indices of the ships that are already targeted
# if a ship is too close to a friendly yard, it will probably escape
# so we remove such ships from the targets
target_bool = np.in1d(opp_ship_pos, prev) & (opp_ship_dis > 2)
target_inds = np.flatnonzero(target_bool)
# the pool of possible new targets consists of non-targeted ships
# that are trapped (vulnerability > 1), have at least one hunter
# nearby, and aren't too close to a friendly yard
candidates = ~target_bool & (opp_ship_vul > 1)
candidates &= (opp_ship_dis > 2) & (nearby >= HUNT_NEARBY)
# we compute scores for each of the candidate ships indicating
# the risk/reward of attacking them
# make the scores of ships that are not candidates negative
opp_ship_score = opp_ship_hal * opp_ship_vul
opp_ship_score[~candidates] = -1
# determine how many targets we would like to have and how many
# new targets we should/can build. we only set new targets if
# there is not a lot of halite left that we can mine - however
# we always hunt after the first part of the game
ratio = np.sum(state.halite_map) / state.starting_halite
if (ratio > HUNTING_MAX_RATIO) and (state.step < HUNTING_STEP):
num_new_targets = 0
else:
num_new_targets = max(self.num_targets - target_inds.size, 0)
num_new_targets = min(num_new_targets, np.sum(candidates))
# we can take those num_new_targets ships with maximum score
# since scores are >= 0 and we forced the scores of non-candidate
# ships to equal -1
new_inds = np.argpartition(-opp_ship_score, num_new_targets - 1)
target_inds = np.append(target_inds, new_inds[0:num_new_targets])
# set position/halite/rewards for the targets
self.ship_targets_pos = opp_ship_pos[target_inds]
self.ship_targets_hal = opp_ship_hal[target_inds]
self.ship_targets_rew = np.full_like(self.ship_targets_pos,
state.max_halite)
# write the new targets in the ship_targets list
self.target_list = [key for key, val in state.opp_ships.items()
if val[0] in self.ship_targets_pos]
return
def get_ship_targets(self, ship, state):
full_pos = np.array([], dtype=int)
full_rew = np.array([], dtype=int)
# stop hunting ships after the interest rate spike
if state.total_steps - state.step < STEPS_SPIKE:
return full_pos, full_rew
# find targets that we can attack
ship_hal = state.my_ships[ship][1]
attackable = self.ship_targets_hal > ship_hal
targets_pos = self.ship_targets_pos[attackable]
targets_hal = self.ship_targets_hal[attackable]
targets_rew = self.ship_targets_rew[attackable]
# place bounties on sites around the target ships
for pos, hal, rew in zip(targets_pos, targets_hal, targets_rew):
hood = np.flatnonzero(state.dist[pos, :] <= 1)
hood_rew = np.full_like(hood, rew)
full_pos = np.append(full_pos, hood)
full_rew = np.append(full_rew, hood_rew)
# remove any duplicate indices and rewards
full_pos, inds = np.unique(full_pos, return_index=True)
full_rew = full_rew[inds]
return full_pos, full_rew
def make_graph(self, state):
nsites = state.map_size ** 2
weights = | np.ones(nsites) | numpy.ones |
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import range, zip
import random
import numpy as np
from copy import deepcopy
from scipy.ndimage import map_coordinates
from scipy.ndimage.filters import gaussian_filter, gaussian_gradient_magnitude
from scipy.ndimage.morphology import grey_dilation
from skimage.transform import resize
from scipy.ndimage.measurements import label as lb
def generate_elastic_transform_coordinates(shape, alpha, sigma):
n_dim = len(shape)
offsets = []
for _ in range(n_dim):
offsets.append(gaussian_filter((np.random.random(shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
tmp = tuple([np.arange(i) for i in shape])
coords = np.meshgrid(*tmp, indexing='ij')
indices = [np.reshape(i + j, (-1, 1)) for i, j in zip(offsets, coords)]
return indices
def create_zero_centered_coordinate_mesh(shape):
tmp = tuple([np.arange(i) for i in shape])
coords = np.array(np.meshgrid(*tmp, indexing='ij')).astype(float)
for d in range(len(shape)):
coords[d] -= ((np.array(shape).astype(float) - 1) / 2.)[d]
return coords
def convert_seg_image_to_one_hot_encoding(image, classes=None):
'''
Takes as input an nd array of a label map (any dimension). Outputs a one hot encoding of the label map.
Example (3D): if input is of shape (x, y, z), the output will ne of shape (n_classes, x, y, z)
'''
if classes is None:
classes = np.unique(image)
out_image = np.zeros([len(classes)]+list(image.shape), dtype=image.dtype)
for i, c in enumerate(classes):
out_image[i][image == c] = 1
return out_image
def elastic_deform_coordinates(coordinates, alpha, sigma):
n_dim = len(coordinates)
offsets = []
for _ in range(n_dim):
offsets.append(
gaussian_filter((np.random.random(coordinates.shape[1:]) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
offsets = np.array(offsets)
indices = offsets + coordinates
return indices
def rotate_coords_3d(coords, angle_x, angle_y, angle_z):
rot_matrix = np.identity(len(coords))
rot_matrix = create_matrix_rotation_x_3d(angle_x, rot_matrix)
rot_matrix = create_matrix_rotation_y_3d(angle_y, rot_matrix)
rot_matrix = create_matrix_rotation_z_3d(angle_z, rot_matrix)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def rotate_coords_2d(coords, angle):
rot_matrix = create_matrix_rotation_2d(angle)
coords = np.dot(coords.reshape(len(coords), -1).transpose(), rot_matrix).transpose().reshape(coords.shape)
return coords
def scale_coords(coords, scale):
return coords * scale
def uncenter_coords(coords):
shp = coords.shape[1:]
coords = deepcopy(coords)
for d in range(coords.shape[0]):
coords[d] += (shp[d] - 1) / 2.
return coords
def interpolate_img(img, coords, order=3, mode='nearest', cval=0.0, is_seg=False):
if is_seg and order != 0:
unique_labels = np.unique(img)
result = np.zeros(coords.shape[1:], img.dtype)
for i, c in enumerate(unique_labels):
res_new = map_coordinates((img == c).astype(float), coords, order=order, mode=mode, cval=cval)
result[res_new >= 0.5] = c
return result
else:
return map_coordinates(img.astype(float), coords, order=order, mode=mode, cval=cval).astype(img.dtype)
def generate_noise(shape, alpha, sigma):
noise = np.random.random(shape) * 2 - 1
noise = gaussian_filter(noise, sigma, mode="constant", cval=0) * alpha
return noise
def find_entries_in_array(entries, myarray):
entries = np.array(entries)
values = np.arange(np.max(myarray) + 1)
lut = np.zeros(len(values), 'bool')
lut[entries.astype("int")] = True
return np.take(lut, myarray.astype(int))
def center_crop_3D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_3D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2, 3 and 4 are x y z
center = np.array(img.shape[2:]) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * (len(img.shape) - 2)
else:
center_crop = crop_size
assert len(center_crop) == (len(
img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
return img[:, :, int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.),
int(center[2] - center_crop[2] / 2.):int(center[2] + center_crop[2] / 2.)]
def center_crop_2D_image(img, crop_size):
center = np.array(img.shape) / 2.
if type(crop_size) not in (tuple, list):
center_crop = [int(crop_size)] * len(img.shape)
else:
center_crop = crop_size
assert len(center_crop) == len(
img.shape), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (2d)"
return img[int(center[0] - center_crop[0] / 2.):int(center[0] + center_crop[0] / 2.),
int(center[1] - center_crop[1] / 2.):int(center[1] + center_crop[1] / 2.)]
def center_crop_2D_image_batched(img, crop_size):
# dim 0 is batch, dim 1 is channel, dim 2 and 3 are x y
center = | np.array(img.shape[2:]) | numpy.array |
import sys
sys.path.insert(0, "/home/emmanuel/code/kernellib")
sys.path.insert(0, "/home/jovyan/work/workspace/software/kernellib")
from kernellib.dependence import HSIC, RHSIC
import numpy as np
import warnings
warnings.filterwarnings('ignore')
class HSICDependence:
def __init__(
self, model="linear", random_state=123, subsample=1000, n_features=2000
):
self.model = model
self.random_state = random_state
self.subsample = subsample
self.n_features = n_features
self.fit = False
def fit_model(self, X, Y):
if self.model == "linear":
self.model = HSIC(kernel="lin", random_state=self.random_state)
elif self.model == "rbf":
self.model = HSIC(
kernel="rbf", random_state=1234, sub_sample=self.subsample
)
elif self.model == "rff":
self.model = RHSIC(
kernel_approx="rff",
n_features=self.n_features,
random_state=self.random_state,
sub_sample=self.subsample,
)
else:
raise ValueError("Unrecognized model.")
# Fit model
self.model.fit(X, Y)
self.fit = True
return self
def get_hsic(self):
if not self.fit:
raise ValueError("Unfit model. Need data first.")
return self.model.hsic_value
def get_derivative(self):
if not self.fit:
raise ValueError("Unfit model. Need data first.")
if not hasattr(self, "derX"):
self.derX, self.derY = self.model.derivative()
return self.derX, self.derY
def get_mod(self):
if not hasattr(self, "derX"):
self.derX, self.derY = self.model.derivative()
return np.sqrt(np.abs(self.derX) + np.abs(self.derY))
def get_angle(self):
if not hasattr(self, "derX"):
self.derX, self.derY = self.model.derivative()
return np.rad2deg( | np.arctan2(self.derY, self.derX) | numpy.arctan2 |
import numpy as np
import scipy as sp
import scipy.optimize
import matplotlib.pyplot as plt
from dimension_reduction import get_rank_reduction_matrix
import idw
import time
import glpk
glpk.env.term_on = False
#Given a data matrix X [dxn, columns are samples]
#a d-dimensional starting vector z and a d-dimensional
#direction vector [not necessarily normalized] v,
#compute the next iterate for the hit-and-run algorithm
def hit_and_run_iter(X, z, v):
D, N = X.shape
res_one = lin_prog_query(X, z, v)
res_two = lin_prog_query(X, z, -v)
#Interpret the optimization result, and return the next vector
maximal_a = res_one[N]
minimal_a = -res_two[N]
picked_a = np.random.uniform(low=minimal_a, high=maximal_a)
return z + v * picked_a
#Implements the method from
#https://arxiv.org/pdf/1402.4670.pdf
def overrelaxed_hit_and_run_iter(X, z, v):
D, N = X.shape
res_one = lin_prog_query(X, z, v)
res_two = lin_prog_query(X, z, -v)
#Interpret the optimization result, and return the next vector
maximal_a = res_one[N]
minimal_a = -res_two[N]
L = maximal_a - minimal_a
t_zero = -minimal_a
A = 2.0 * (t_zero / L) - 1.0
R = np.random.uniform(low=0.0, high=1.0)
A_plus_one = 1.0 + A
under_radical = A_plus_one * A_plus_one - 4.0 * A * R
numerator = A_plus_one - np.sqrt(under_radical)
t_one = L * (numerator / (2.0 * A))
picked_a = minimal_a + t_one
return z + v * picked_a
#Given a data matrix X [dxn, columns are samples]
#a d-dimensional starting vector z
#and the (dists, vecs) vector
#that one gets from "get_maximal_vertex_direction",
#perform one iteration of schmancy hit-and-run
def schmancy_hit_and_run_iter(X, z, dist_vec_pair):
dists, vecs = dist_vec_pair
D, N = X.shape
X_relativized = X - z.reshape((D, 1))
#The way we pick a direction is through rejection sampling
#keep trying to pick until we get something
while True:
v = np.random.normal(size=D)
v = v / np.linalg.norm(v)
#project down the data matrix onto the hyperplane,
#as this will be used to determine
#proximity weights to each vertex
X_proj = project_hyperplane(X_relativized, v)
p = D - 1
W = idw.get_idw_weights(np.transpose(X_proj))
#Compute relativized estimated dists
#for the candidate hyperplane
#by measuring agreement of vecs with dists
rel_dists = dists * np.abs(np.matmul(np.transpose(vecs), v))
#Okay, now with the relativized estimated dists
#in hand, compute the dist estimate using the weights
est_dist = np.dot(W, rel_dists)
max_dist = np.amax(rel_dists)
r = est_dist / max_dist
#Now, with probability r, accept the choice of v
#otherwise, keep spinning.
if (np.random.uniform() <= r):
break
return overrelaxed_hit_and_run_iter(X, z, v)
#Given a data matrix X [dxn, columns are samples],
#return a pair (dists, vecs)
#where dists is an array of n numbers, and vecs is a dxn array
#of unit vectors such that they are distances, directions
#of the paths to the furthest vertex from each vertex in X
def get_maximal_vertex_directions(X):
X_T = np.transpose(X)
dist_mat = sp.spatial.distance_matrix(X_T, X_T)
max_dist_indices = np.argmax(dist_mat, axis=1)
opp_vertices = X[:, max_dist_indices]
unnorm_vecs = opp_vertices - X
norms = np.linalg.norm(unnorm_vecs, axis=0, keepdims=True)
vecs = unnorm_vecs / norms
return (norms.reshape(-1), vecs)
#Given a data matrix X [dxn, columns are samples],
#project the data onto the plane normal to the unit vector
#v, and return the result
def project_hyperplane(X, v):
#n-vector of projections
projs = np.dot(np.transpose(X), v)
sub = | np.outer(v, projs) | numpy.outer |
import numpy as np
#import numba
def get_particles(lattice):
"""
return the number of particles, regarless of
its state.
occupied state == lattice[k] > 0
empty state == lattice[k] = 0
"""
return | np.sum(lattice > 0) | numpy.sum |
from typing import Tuple
import numba as nb
import numpy as np
from nptyping import NDArray
SQRT2 = np.float32( | np.sqrt(2) | numpy.sqrt |
"""
Adapted from https://github.com/simbilod/optio
SMF specs from photonics.byu.edu/FiberOpticConnectors.parts/images/smf28.pdf
MFD:
- 10.4 for Cband
- 9.2 for Oband
"""
import hashlib
from typing import Any, Dict, Optional
import meep as mp
import numpy as np
from gdsfactory.serialization import clean_value_name
from gdsfactory.types import Floats
nm = 1e-3
nSi = 3.47
nSiO2 = 1.44
def fiber_ncore(fiber_numerical_aperture, fiber_nclad):
return (fiber_numerical_aperture**2 + fiber_nclad**2) ** 0.5
def get_simulation_grating_fiber(
period: float = 0.66,
fill_factor: float = 0.5,
n_periods: int = 30,
widths: Optional[Floats] = None,
gaps: Optional[Floats] = None,
slab_thickness: float = 150 * nm,
fiber_angle_deg: float = 20.0,
fiber_xposition: float = 1.0,
fiber_core_diameter: float = 10.4,
fiber_numerical_aperture: float = 0.14,
fiber_nclad: float = nSiO2,
nwg: float = nSi,
nslab: Optional[float] = None,
nclad: float = nSiO2,
nbox: float = nSiO2,
nsubstrate: float = nSi,
pml_thickness: float = 1.0,
substrate_thickness: float = 1.0,
box_thickness: float = 2.0,
wg_thickness: float = 220 * nm,
top_clad_thickness: float = 2.0,
air_gap_thickness: float = 1.0,
fiber_thickness: float = 2.0,
resolution: int = 64, # pixels/um
wavelength_start: float = 1.4,
wavelength_stop: float = 1.7,
wavelength_points: int = 150,
eps_averaging: bool = False,
fiber_port_y_offset_from_air: float = 1,
waveguide_port_x_offset_from_grating_start: float = 10,
fiber_port_x_size: Optional[float] = None,
xmargin: float = 10.0,
) -> Dict[str, Any]:
r"""Returns simulation results from grating coupler with fiber.
na**2 = ncore**2 - nclad**2
ncore = sqrt(na**2 + ncore**2)
Args:
period: fiber grating period
fill_factor: fraction of the grating period filled with the grating material.
n_periods: number of periods
widths: Optional list of widths. Overrides period, fill_factor, n_periods
gaps: Optional list of gaps. Overrides period, fill_factor, n_periods
fiber_angle_deg: fiber angle in degrees
fiber_xposition: xposition
fiber_core_diameter: fiber diameter
fiber_numerical_aperture: NA
fiber_nclad: fiber cladding index.
fiber_ncore: fiber core index
nwg: waveguide index.
nclad: top cladding index.
nbox: box index bottom.
nsubstrate: index substrate.
pml_thickness: pml_thickness (um)
substrate_thickness: substrate_thickness (um)
box_thickness: thickness for bottom cladding (um)
wg_thickness: wg_thickness (um)
top_clad_thickness: thickness of the top cladding.
air_gap_thickness: air gap thickness.
fiber_thickness: fiber_thickness
resolution: resolution pixels/um
wavelength_start: min wavelength (um)
wavelength_stop: max wavelength (um)
wavelength_points: wavelength points.
eps_averaging: epsilon averaging.
fiber_port_y_offset_from_air: y_offset from fiber to air (um).
waveguide_port_x_offset_from_grating_start:
fiber_port_x_size:
xmargin: margin from PML to grating end
.. code::
fiber_xposition
|
fiber_core_diameter
/ / / / |
/ / / / | fiber_thickness
/ / / / _ _ _| _ _ _ _ _ _ _
|
| air_gap_thickness
_ _ _| _ _ _ _ _ _ _
|
nclad | top_clad_thickness
_ _ _ _ _ _| _ _ _ _ _ _ _
nwg _| |_| |_| |__________| _
| |
nslab |wg_thickness | slab_thickness
______________ _ _ _|_ _ _ _ _ _ _ _|
|
nbox |box_thickness
______________ _ _ _|_ _ _ _ _ _ _ _
|
nsubstrate |substrate_thickness
______________ _ _ _|
|--------------------|<-------->
xmargin
"""
wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)
wavelength = np.mean(wavelengths)
freqs = 1 / wavelengths
widths = widths or n_periods * [period * fill_factor]
gaps = gaps or n_periods * [period * (1 - fill_factor)]
nslab = nslab or nwg
settings = dict(
widths=widths,
gaps=gaps,
n_periods=n_periods,
nslab=nslab,
fiber_angle_deg=fiber_angle_deg,
fiber_xposition=fiber_xposition,
fiber_core_diameter=fiber_core_diameter,
fiber_numerical_aperture=fiber_numerical_aperture,
fiber_nclad=fiber_nclad,
nwg=nwg,
nclad=nclad,
nbox=nbox,
nsubstrate=nsubstrate,
pml_thickness=pml_thickness,
substrate_thickness=substrate_thickness,
box_thickness=box_thickness,
wg_thickness=wg_thickness,
top_clad_thickness=top_clad_thickness,
air_gap_thickness=air_gap_thickness,
fiber_thickness=fiber_thickness,
resolution=resolution,
wavelength_start=wavelength_start,
wavelength_stop=wavelength_stop,
wavelength_points=wavelength_points,
eps_averaging=eps_averaging,
fiber_port_y_offset_from_air=fiber_port_y_offset_from_air,
waveguide_port_x_offset_from_grating_start=waveguide_port_x_offset_from_grating_start,
fiber_port_x_size=fiber_port_x_size,
)
settings_string = clean_value_name(settings)
settings_hash = hashlib.md5(settings_string.encode()).hexdigest()[:8]
# Angle in radians
fiber_angle = np.radians(fiber_angle_deg)
# Z (Y)-domain
sz = (
+pml_thickness
+ substrate_thickness
+ box_thickness
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness
+ fiber_thickness
+ pml_thickness
)
# XY (X)-domain
# Assume fiber port dominates
fiber_port_y = (
-sz / 2
+ wg_thickness
+ top_clad_thickness
+ air_gap_thickness
+ fiber_port_y_offset_from_air
)
fiber_port_x_offset_from_angle = np.abs(fiber_port_y * | np.tan(fiber_angle) | numpy.tan |
from datetime import datetime
import json
import glob
import os
from pathlib import Path
from multiprocessing.pool import ThreadPool
from typing import Dict
import numpy as np
import pandas as pd
from scipy.stats.mstats import gmean
import torch
from torch import nn
from torch.utils.data import DataLoader
ON_KAGGLE: bool = 'KAGGLE_WORKING_DIR' in os.environ
def gmean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).agg(lambda x: gmean(list(x)))
def mean_df(df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(level=0).mean()
def load_model(model: nn.Module, path: Path) -> Dict:
state = torch.load(str(path))
model.load_state_dict(state['model'])
print('Loaded model from epoch {epoch}, step {step:,}'.format(**state))
return state
class ThreadingDataLoader(DataLoader):
def __iter__(self):
sample_iter = iter(self.batch_sampler)
if self.num_workers == 0:
for indices in sample_iter:
yield self.collate_fn([self._get_item(i) for i in indices])
else:
prefetch = 1
with ThreadPool(processes=self.num_workers) as pool:
futures = []
for indices in sample_iter:
futures.append([pool.apply_async(self._get_item, args=(i,))
for i in indices])
if len(futures) > prefetch:
yield self.collate_fn([f.get() for f in futures.pop(0)])
# items = pool.map(lambda i: self.dataset[i], indices)
# yield self.collate_fn(items)
for batch_futures in futures:
yield self.collate_fn([f.get() for f in batch_futures])
def _get_item(self, i):
return self.dataset[i]
def write_event(log, step: int, **data):
data['step'] = step
data['dt'] = datetime.now().isoformat()
log.write(json.dumps(data, sort_keys=True))
log.write('\n')
log.flush()
def _smooth(ys, indices):
return [np.mean(ys[idx: indices[i + 1]])
for i, idx in enumerate(indices[:-1])]
import random
import math
from PIL import Image
from torchvision.transforms import (
ToTensor, Normalize, Compose, Resize, CenterCrop, RandomCrop,
RandomHorizontalFlip)
# class RandomSizedCrop:
# """Random crop the given PIL.Image to a random size
# of the original size and and a random aspect ratio
# of the original aspect ratio.
# size: size of the smaller edge
# interpolation: Default: PIL.Image.BILINEAR
# """
# def __init__(self, size, interpolation=Image.BILINEAR,
# min_aspect=4/5, max_aspect=5/4,
# min_area=0.25, max_area=1):
# self.size = size
# self.interpolation = interpolation
# self.min_aspect = min_aspect
# self.max_aspect = max_aspect
# self.min_area = min_area
# self.max_area = max_area
# def __call__(self, img):
# for attempt in range(10):
# area = img.size[0] * img.size[1]
# target_area = random.uniform(self.min_area, self.max_area) * area
# aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
# w = int(round(math.sqrt(target_area * aspect_ratio)))
# h = int(round(math.sqrt(target_area / aspect_ratio)))
# if random.random() < 0.5:
# w, h = h, w
# if w <= img.size[0] and h <= img.size[1]:
# x1 = random.randint(0, img.size[0] - w)
# y1 = random.randint(0, img.size[1] - h)
# img = img.crop((x1, y1, x1 + w, y1 + h))
# assert(img.size == (w, h))
# return img.resize((self.size, self.size), self.interpolation)
# # Fallback
# scale = Resize(self.size, interpolation=self.interpolation)
# crop = CenterCrop(self.size)
# return crop(scale(img))
class RandomSizedCrop:
"""Random crop the given PIL.Image to a random size
of the original size and and a random aspect ratio
of the original aspect ratio.
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BICUBIC,
min_aspect=4/5, max_aspect=5/4,
min_area=0.25, max_area=1):
self.size = size
self.interpolation = interpolation
self.min_aspect = min_aspect
self.max_aspect = max_aspect
self.min_area = min_area
self.max_area = max_area
def __call__(self, img):
size_0 = img.size[0]
size_1 = img.size[1]
print(size_0, size_1)
img_data = np.array(img)
if ((size_0/size_1>=1.3) or (size_1/size_0>=1.3)):
w_resized = int(img.size[0] * 300 / img.size[1])
h_resized = int(img.size[1] * 300 / img.size[0])
if size_0 < size_1:
resized = img.resize((w_resized ,300))
pad_width = 300 - w_resized
df = pd.DataFrame(img_data[0,:,:])
padding = (pad_width // 2, 0, pad_width-(pad_width//2), 0)
else:
resized = img.resize((300, h_resized))
pad_height = 300 - h_resized
df = pd.DataFrame(img_data[:,0,:])
padding = (0, pad_height // 2, 0, pad_height-(pad_height//2))
AvgColour = tuple([int(i) for i in df.mean()])
resized_w_pad = ImageOps.expand(resized, padding, fill=AvgColour)
# plt.figure(figsize=(8,8))
# plt.subplot(133)
# plt.imshow(resized_w_pad)
# plt.axis('off')
# plt.title('Padded Image',fontsize=15)
# plt.show()
else:
for attempt in range(10):
print(attempt)
area = img.size[0] * img.size[1]
target_area = random.uniform(self.min_area, self.max_area) * area
aspect_ratio = random.uniform(self.min_aspect, self.max_aspect)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert(img.size == (w, h))
return img.resize((self.size, self.size), self.interpolation)
scale = Resize(self.size, interpolation=self.interpolation)
crop = CenterCrop(self.size)
resized_w_pad = crop(scale(img))
# Fallback
return resized_w_pad
train_transform = Compose([
RandomCrop(288),
RandomHorizontalFlip(),
])
test_transform = Compose([
RandomCrop(288),
RandomHorizontalFlip(),
])
tensor_transform = Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from pathlib import Path
from typing import Callable, List
import cv2
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
N_CLASSES = 1103
DATA_ROOT = Path('../input/imet-2019-fgvc6' if ON_KAGGLE else '/nfsshare/home/white-hearted-orange/data')
class TrainDataset(Dataset):
def __init__(self, root: Path, df: pd.DataFrame,
image_transform: Callable, debug: bool = True):
super().__init__()
self._root = root
self._df = df
self._image_transform = image_transform
self._debug = debug
def __len__(self):
return len(self._df)
def __getitem__(self, idx: int):
item = self._df.iloc[idx]
image = load_transform_image(
item, self._root, self._image_transform, debug=self._debug)
target = torch.zeros(N_CLASSES)
for cls in item.attribute_ids.split():
target[int(cls)] = 1
return image, target
class TTADataset:
def __init__(self, root: Path, df: pd.DataFrame,
image_transform: Callable, tta: int):
self._root = root
self._df = df
self._image_transform = image_transform
self._tta = tta
def __len__(self):
return len(self._df) * self._tta
def __getitem__(self, idx):
item = self._df.iloc[idx % len(self._df)]
image = load_transform_image(item, self._root, self._image_transform)
return image, item.id
def load_transform_image(
item, root: Path, image_transform: Callable, debug: bool = False):
image = load_image(item, root)
image = image_transform(image)
if debug:
image.save('_debug.png')
return tensor_transform(image)
def load_image(item, root: Path) -> Image.Image:
image = cv2.imread(str(root / f'{item.id}.png'))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return Image.fromarray(image)
def get_ids(root: Path) -> List[str]:
return sorted({p.name.split('_')[0] for p in root.glob('*.png')})
import argparse
from collections import defaultdict, Counter
import random
import pandas as pd
import tqdm
def make_folds(n_folds: int) -> pd.DataFrame:
df = pd.read_csv(DATA_ROOT / 'train.csv')
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split()
for cls in classes)
fold_cls_counts = defaultdict(int)
folds = [-1] * len(df)
for item in tqdm.tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df)):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(n_folds)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
df['fold'] = folds
return df
###########################models###############################
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
import torchvision.models as M
class AvgPool(nn.Module):
def forward(self, x):
return F.avg_pool2d(x, x.shape[2:])
def create_net(net_cls, pretrained: bool):
if ON_KAGGLE:
net = net_cls()
model_name = net_cls.__name__
weights_path = f'../input/{model_name}/{model_name}.pth'
net.load_state_dict(torch.load(weights_path))
else:
#net = net_cls(pretrained=pretrained)
net = net_cls()
model_name = net_cls.__name__
net.load_state_dict(torch.load(f'/nfsshare/home/white-hearted-orange/kaggle-imet-2019-master/imet/{model_name}.pth'))
print(model_name)
return net
class ResNet(nn.Module):
def __init__(self, num_classes,
pretrained=False, net_cls=M.resnet50, dropout=False):
super().__init__()
self.net = create_net(net_cls, pretrained=pretrained)
self.net.avgpool = AvgPool()
n = self.net.fc.in_features
#print(n)
self.net = nn.Sequential(*list(self.net.children())[:-1])
if dropout:
self.culture = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(n, 398)
)
self.hidden = nn.Linear(398,50)
self.relu = nn.ReLU()
self.tag = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(n+50, num_classes - 398)
)
else:
self.culture = nn.Linear(n, 398)
self.hidden = nn.Linear(398, 50)
self.relu = nn.ReLU()
self.tag = nn.Linear(n + 50, num_classes - 398)
def forward(self, x):
h0 = self.net(x)
h0 = h0.view(h0.size()[:2])
#print(h0.size())
h1 = self.culture(h0)
h2 =self.relu( self.hidden(h1))
#print(h2.size())
h3 = self.tag(torch.cat([h0,h2],1))
prediction = torch.cat([h1,h3],1)
return h1,h3,prediction
resnet50 = partial(ResNet, net_cls = M.resnet50)
resnet101 = partial(ResNet, net_cls = M.resnet101)
########################main.py########################################################
import argparse
from itertools import islice
import json
from pathlib import Path
import shutil
import warnings
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.metrics import fbeta_score
from sklearn.exceptions import UndefinedMetricWarning
import torch
from torch import nn, cuda
from torch.optim import Adam,SGD,lr_scheduler
import tqdm
def predict(model, root: Path, df: pd.DataFrame, out_path: Path,
batch_size: int, tta: int, workers: int, use_cuda: bool):
loader = DataLoader(
dataset=TTADataset(root, df, test_transform, tta=tta),
shuffle=False,
batch_size=batch_size,
num_workers=workers,
)
model.eval()
all_outputs, all_ids = [], []
with torch.no_grad():
for inputs, ids in tqdm.tqdm(loader, desc='Predict'):
if use_cuda:
inputs = inputs.cuda()
_,_, out = model(inputs)
outputs = torch.sigmoid(out)
all_outputs.append(outputs.data.cpu().numpy())
all_ids.extend(ids)
df = pd.DataFrame(
data=np.concatenate(all_outputs),
index=all_ids,
columns=map(str, range(N_CLASSES)))
df = mean_df(df)
df.to_hdf(out_path, 'prob', index_label='id')
print(f'Saved predictions to {out_path}')
def train(args, model: nn.Module, criterion, *, params,folds,
init_optimizer, use_cuda,
n_epochs=None, patience=2, max_lr_changes=2) -> bool:
lr = args.lr
n_epochs = n_epochs or args.n_epochs
params = list(params)
optimizer = init_optimizer(params, lr)
run_root = Path(args.run_root)
model_path = run_root / 'model.pt'
best_model_path = run_root / 'best-model.pt'
# pretrain_path = Path('../input/model1')/'best-model.pt'
if best_model_path.exists():
state = load_model(model,best_model_path)
epoch = state['epoch']
step = state['step']
best_valid_loss = state['best_valid_loss']
else:
epoch = 1
step = 0
best_valid_loss = float('inf')
lr_changes = 0
save = lambda ep: torch.save({
'model': model.state_dict(),
'epoch': ep,
'step': step,
'best_valid_loss': best_valid_loss
}, str(model_path))
report_each = 10
log = run_root.joinpath('train.log').open('at', encoding='utf8')
valid_losses = []
lr_reset_epoch = epoch
### doing cv
train_fold = folds[folds['fold'] != 0]
valid_fold = folds[folds['fold'] == 0]
def make_loader(df: pd.DataFrame, image_transform) -> DataLoader:
return DataLoader(
TrainDataset(train_root, df, image_transform, debug=args.debug),
shuffle=True,
batch_size=args.batch_size,
num_workers=args.workers,
)
train_loader = make_loader(train_fold, train_transform)
valid_loader = make_loader(valid_fold, test_transform)
##############
#validation(model, criterion, valid_loader, use_cuda)
#validation2(model, criterion, valid_loader, use_cuda)
cultureloss = nn.BCEWithLogitsLoss(reduction = 'none',pos_weight = torch.ones([398]))
tagloss = nn.BCEWithLogitsLoss(reduction='none',pos_weight = torch.ones([705]))
cultureloss = cultureloss.cuda()
tagloss = tagloss.cuda()
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max = 20)
for epoch in range(epoch, n_epochs + 1):
scheduler.step()
model.train()
losses = []
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}, lr {lr}')
tl = train_loader
try:
mean_loss = 0
for i, (inputs, targets) in enumerate(tl):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
#label smoothing
batch_size = inputs.size(0)
#smoothed_labels =0.9*targets + 0.1*(torch.ones((batch_size,N_CLASSES)).cuda()-targets)
#smoothed_labels = smoothed_labels.cuda()
h1, h3,outputs = model(inputs)
loss = 10*_reduce_loss(cultureloss(h1, targets[:,:398])) + _reduce_loss(tagloss(h3, targets[:,398:])) + _reduce_loss(criterion(outputs, targets))
batch_size = inputs.size(0)
totalloss = _reduce_loss(criterion(outputs, targets))
(batch_size * loss).backward()
if (i + 1) % args.step == 0:
optimizer.step()
optimizer.zero_grad()
step += 1
tq.update(batch_size)
losses.append(totalloss.item())
mean_loss = | np.mean(losses[-report_each:]) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 21:31:48 2019
@author: bill
This contains all the functions needed to execute the main NMF Analysis strategy as contained in the NMF_Analysis class.
"""
import pickle
import numpy as np
import scipy.sparse
from sklearn.decomposition import NMF
import sklearn.preprocessing
import scipy
'''
Modifications to H that ensure each topic is mapped to a unit vector in the term space.
'''
def norm_fun(vector):
return np.linalg.norm(vector) #Normalizing the vector to have a length of one in topic space.
def b_mat(H):
num_topics = np.shape(H)[0]
B = np.zeros((num_topics,num_topics), dtype = float)
B_inv = np.zeros((num_topics,num_topics), dtype = float)
for topic in range(num_topics):
norm = norm_fun(H[topic])
B[topic,topic] = 1/norm
B_inv[topic,topic] = norm
return B, B_inv
'''
The main function to run NMF on the desired number of topics.
'''
def run_ensemble_NMF_strategy(num_topics, num_folds, num_runs, num_docs, doc_term_matrix):
#Defines the number of elements in each fold and ensures that the total sums correctly
fold_sizes = (num_docs // num_folds) * np.ones(num_folds, dtype=np.int)
fold_sizes[:num_docs % num_folds] += 1
#Creates a list that will save all the final H matrices for the last NMF application.
H_list = []
#For every run over all folds
for run in range(num_runs):
doc_ids = np.arange(num_docs)
np.random.shuffle(doc_ids)
current_fold = 0
for fold, fold_size in enumerate(fold_sizes):
#Updates the currentfold in the process
start, stop = current_fold, current_fold+fold_size
current_fold = stop
#Removes the current fold
sample_ids = list(doc_ids)
for id in doc_ids[start:stop]:
sample_ids.remove(id)
#
sample_doc_ids = []
for doc_index in sample_ids:
sample_doc_ids.append(doc_ids[doc_index])
S = doc_term_matrix[sample_ids,:]
S = scipy.sparse.csr_matrix(S)
model = NMF( init="nndsvd", n_components = num_topics )
W = model.fit_transform( doc_term_matrix )
H = model.components_
H_list.append(H)
H = 0.0
W = 0.0
model = 0.0
M = np.vstack(H_list)
model = NMF( init="nndsvd", n_components = num_topics )
W = model.fit_transform(M)
ensemble_H = model.components_
HT = sklearn.preprocessing.normalize( ensemble_H.T, "l2", axis=0 )
ensemble_W = doc_term_matrix.dot(HT)
#Updating the W and H matrices to normalize H.
B,B_inv = b_mat(ensemble_H)
ensemble_H = np.matmul(B,ensemble_H)
ensemble_W = | np.matmul(ensemble_W, B_inv) | numpy.matmul |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Integral transformation with analytic Fourier transformation
'''
import time
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import iden_coeffs, _conc_mos
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC
from pyscf.pbc.df.fft_ao2mo import _format_kpts, _iskconserv
from pyscf.pbc.df.df_ao2mo import _mo_as_complex, _dtrans, _ztrans
from pyscf.pbc.df.df_ao2mo import warn_pbc2d_eri
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
from pyscf import __config__
def get_eri(mydf, kpts=None,
compact=getattr(__config__, 'pbc_df_ao2mo_get_eri_compact', True)):
cell = mydf.cell
nao = cell.nao_nr()
kptijkl = _format_kpts(kpts)
if not _iskconserv(cell, kptijkl):
lib.logger.warn(cell, 'aft_ao2mo: momentum conservation not found in '
'the given k-points %s', kptijkl)
return numpy.zeros((nao,nao,nao,nao))
kpti, kptj, kptk, kptl = kptijkl
q = kptj - kpti
mesh = mydf.mesh
coulG = mydf.weighted_coulG(q, False, mesh)
nao_pair = nao * (nao+1) // 2
max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]) * .8)
####################
# gamma point, the integral is real and with s4 symmetry
if gamma_point(kptijkl):
eriR = numpy.zeros((nao_pair,nao_pair))
for pqkR, pqkI, p0, p1 \
in mydf.pw_loop(mesh, kptijkl[:2], q, max_memory=max_memory,
aosym='s2'):
vG = numpy.sqrt(coulG[p0:p1])
pqkR *= vG
pqkI *= vG
lib.ddot(pqkR, pqkR.T, 1, eriR, 1)
lib.ddot(pqkI, pqkI.T, 1, eriR, 1)
pqkR = pqkI = None
if not compact:
eriR = ao2mo.restore(1, eriR, nao).reshape(nao**2,-1)
return eriR
####################
# (kpt) i == j == k == l != 0
# (kpt) i == l && j == k && i != j && j != k =>
#
# complex integrals, N^4 elements
elif is_zero(kpti-kptl) and is_zero(kptj-kptk):
eriR = numpy.zeros((nao**2,nao**2))
eriI = numpy.zeros((nao**2,nao**2))
for pqkR, pqkI, p0, p1 \
in mydf.pw_loop(mesh, kptijkl[:2], q, max_memory=max_memory):
vG = numpy.sqrt(coulG[p0:p1])
pqkR *= vG
pqkI *= vG
# rho_pq(G+k_pq) * conj(rho_rs(G-k_rs))
zdotNC(pqkR, pqkI, pqkR.T, pqkI.T, 1, eriR, eriI, 1)
pqkR = pqkI = None
pqkR = pqkI = coulG = None
# transpose(0,1,3,2) because
# j == k && i == l =>
# (L|ij).transpose(0,2,1).conj() = (L^*|ji) = (L^*|kl) => (M|kl)
# rho_rs(-G+k_rs) = conj(transpose(rho_sr(G+k_sr), (0,2,1)))
eri = lib.transpose((eriR+eriI*1j).reshape(-1,nao,nao), axes=(0,2,1))
return eri.reshape(nao**2,-1)
####################
# aosym = s1, complex integrals
#
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1. => kptl == kptk
#
else:
eriR = | numpy.zeros((nao**2,nao**2)) | numpy.zeros |
from abc import ABC, abstractmethod
import numpy as np
from scipy.stats import poisson
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class DiscreteEnvironment(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def step(self,state,action):
pass
@abstractmethod
def reset(self):
pass
class Gambler_env(DiscreteEnvironment):
def __init__(self):
self.nstates = 101
self.p_h = 0.3
self.nactions = 0
self.P = {}
self.rewards = np.zeros(self.nstates)
self.rewards[100] = 1
def step(self,state,action):
pass
def reset(self):
pass
def set_p_h(self,p_h):
self.p_h = p_h
class Jack_env(DiscreteEnvironment):
def __init__(self):
self.max_cars = 20
self.max_move = 5
self.mu_rent_first_location = 3
self.mu_rent_second_location = 4
self.mu_return_first_location = 3
self.mu_return_second_location = 2
self.profit = 10
self.loss = 2
self.nstates = self.max_cars*self.max_cars
self.store_rent_1 = []
self.store_rent_2 = []
self.store_return_1 = []
self.store_return_2 = []
for i in range(11):
self.store_rent_1.append(poisson.pmf(i,self.mu_rent_first_location))
self.store_rent_2.append(poisson.pmf(i,self.mu_rent_second_location))
self.store_return_1.append(poisson.pmf(i,self.mu_return_first_location))
self.store_return_2.append(poisson.pmf(i,self.mu_return_second_location))
self.actions = np.arange(-self.max_move, self.max_move + 1)
def step(self,state,action):
pass
def reset(self):
pass
class Grid_1(DiscreteEnvironment):
def __init__(self):
self.shape = [8,8]
self.nstates = 64
self.nactions = 4
P = {}
itr = 0
temp = [(0 - 1) * | np.random.random() | numpy.random.random |
"""
Credits : https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
"""
import numpy as np
import cv2
from tqdm.notebook import tqdm
import pickle
# You should replace these 3 lines with the output in calibration step
DIM = (1088, 1080)
K = | np.array([[773.6719811071623, 0.0, 532.3446174857597], [0.0, 774.3187867828567, 565.9954169588382], [0.0, 0.0, 1.0]]) | numpy.array |
## IMPORT USEFUL PACKAGES
import os
import pandas as pd
import numpy as np
import cv2
import math
import sklearn
import csv
from sklearn.model_selection import train_test_split
from random import shuffle
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation,Lambda
from keras.layers import ELU
from keras.optimizers import Adam
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.callbacks import ModelCheckpoint
import time
### LOADING IN AND READING DATA:
dataPath = '../udacity-track1-data/driving_log.csv'
### Read CSV File
# Read Data from CSV File
data_full = pd.read_csv(dataPath,
index_col = False)
data_full['direction'] = pd.Series('s', index=data_full.index)
print('Sucessfully accessed csv file')
# Define a image function loading the input image.
# *Note, will not normalize the image input until put image through pipeline. No point in doing it until then.
# Output: Image in RGB
def getImg(path):
img = cv2.imread(path)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return img
# Edit the path for reading the images in the right directory
def getPath(local_path):
filename = local_path.split("/")[-1]
host_path = '../udacity-track1-data/IMG/'+filename
# print(host_path)
return host_path
### Data Processing Scripts:
def brightness_augment(img):
# convert to HSV
img_aug = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
img_aug = np.array(img_aug, dtype = np.float64) # convert to float64 for proper HSV conversion
# random brightness
random_bright = 0.35+np.random.uniform()
# apply brightness augmentation
img_aug[:,:,2] = img_aug[:,:,2]*random_bright
img_aug[:,:,2][img_aug[:,:,2] > 254] = 255 # cap the maximum brightness to 255
#img_aug[:,:,2][img_aug[:,:,2] < 30] = 30 # limit the darkest pixels
#img_aug[:,:,2] = 50 # testing
# convert image back to RGB image
img_aug = np.array(img_aug, dtype = np.uint8) # needed for proper conversion back to RGB or else it wont work
img_aug = cv2.cvtColor(img_aug, cv2.COLOR_HSV2RGB)
return img_aug
def translate_img(img,steer,trans_range):
trans_range_y = 10
# Get Translations
trans_x = trans_range*np.random.uniform() - trans_range/2 # x - Left/Right
trans_y = trans_range_y*np.random.uniform() - trans_range_y/2 # y - Up/Down
# Update Steering Value
steer_trans = steer + trans_x/trans_range*2*0.2
# Create Translation Matrix
T_M = np.float32([[1,0,trans_x],[0,1,trans_y]])
# Apply Translation
img_trans = cv2.warpAffine(img,T_M,(img.shape[1],img.shape[0])) # translate image. Need (col, row) parameter.
return steer_trans, img_trans
def preprocess_img(img):
img_shape = img.shape # Shape output: [row,col,channel]
top_crop = 0.35 # % of top of image to crop out
bottom_crop = 25 # number of bottom pixles to crop out
# Crop out unwanted sections of image:
pp_img = img[math.floor(img_shape[0]*top_crop):(img_shape[0]-bottom_crop),0:img_shape[1]]
new_col = 64
new_row = 64
# Resize Image
pp_img = cv2.resize(pp_img,(new_col,new_row), interpolation=cv2.INTER_AREA)
return pp_img
# Input is a row from the data_full csv file.
# Returns 2 of 3 images and steering values
# For training data
# *Note: <NAME> added an additional correction factor to y_steer (multiplied all steering outputs by 1.2).
# During testing, see if adding such a correction will improve accuracy.
def process_train_img(data_row, use_brightness, use_translation, trans_range):
correction_factor = 0.25
# Random combination of left, right, and center images
rand_combo = np.random.randint(3)
if (rand_combo == 0): # Left and Center
path_1 = getPath(data_row['left'][0].strip())
path_2 = getPath(data_row['center'][0].strip())
steer_cf_1 = correction_factor
steer_cf_2 = 0.0
if (rand_combo == 1): # Right and Center
path_1 = getPath(data_row['right'][0].strip())
path_2 = getPath(data_row['center'][0].strip())
steer_cf_1 = -correction_factor
steer_cf_2 = 0.0
if (rand_combo == 2): # Left and Right
path_1 = getPath(data_row['left'][0].strip())
path_2 = getPath(data_row['right'][0].strip())
steer_cf_1 = correction_factor
steer_cf_2 = -correction_factor
# Get Images
x_img_1 = getImg(path_1)
x_img_2 = getImg(path_2)
# Update Steering
y_steer_1 = data_row.steering[0] + steer_cf_1
y_steer_2 = data_row.steering[0] + steer_cf_1
# Brightness
if use_brightness:
x_img_1 = brightness_augment(x_img_1)
x_img_2 = brightness_augment(x_img_2)
# Translation
if use_translation:
y_steer_1, x_img_1 = translate_img(x_img_1, y_steer_1, trans_range)
y_steer_2, x_img_2 = translate_img(x_img_2, y_steer_2, trans_range)
# Preprocess
x_img_1 = np.array(preprocess_img(x_img_1))
x_img_2 = np.array(preprocess_img(x_img_2))
# Flip
flip_1, flip_2 = np.random.randint(2), np.random.randint(2)
if flip_1 == 0:
x_img_1 = cv2.flip(x_img_1,1)
y_steer_1 = -y_steer_1
if flip_2 == 0:
x_img_2 = cv2.flip(x_img_2,1)
y_steer_2 = -y_steer_2
return x_img_1, x_img_2, y_steer_1, y_steer_2
# Used for validation or testing data of data_full format. In my case, I only use this for training.
# There is a seperate preprocessing script within drive.py for preprocessing images from the simulator before they
# are put in the neural net!
# Input Images: Center images from Frame.
def process_predict_img(data_row):
# Get Image and Steer
x_img = getImg(getPath(data_row['center'][0].strip()))
y_steer = data_row.steering[0]
# Preprocess
x_img = np.array(preprocess_img(x_img))
return x_img, y_steer
## SPLIT DATA INTO TRAINING AND VALIDATION DATA SETS
# Create Index List of Training and Validation Frames
input_list = np.linspace(0,data_full.shape[0]-1,data_full.shape[0],dtype = int)
# Split Data
train_list, valid_list = train_test_split(input_list,test_size = 0.2)
## KERAS GENERATOR AND SUBSAMPLING
# Note that the number of images within the batch will be double of the batch size input because 2 images are pulled
# for each frame
def generate_train_batch(data, train_list, pr_keep, use_brightness, use_translation, trans_range, batch_size = 32):
new_row = 64
new_col = 64
thresh = 0.15
batch_size_n = int(np.round(2*batch_size))
# Create placeholder outputs (np Arrays)
batch_img = np.zeros((batch_size_n, new_row, new_col,3))
batch_steering = np.zeros(batch_size_n)
# Start infinate loop
while 1:
# Shuffle list each time batch is called
shuffle(train_list)
for i_batch in range(batch_size):
cont = True
# Continue Loop Until Pick Values Which Work:
while cont:
# Get Random data_row from training list
i = np.random.randint(len(train_list)) # Pull Index from List
index_train = train_list[i] # Get data_row with pulled index
data_row = data.iloc[[index_train]].reset_index()
# Process Images
x1, x2, y1, y2 = process_train_img(data_row, use_brightness, use_translation, trans_range)
# Generate random num and check if steering values are above threshold.
randVal = np.random.uniform()
if ((abs(float(y1)) < thresh) or (abs(float(y1)) < thresh)):
# if randVal is above probability thresh, throw away selection
if randVal > pr_keep:
cont = True
else:
cont = False
else:
cont = False
# Add images and steering values to batch
batch_img[(2*i_batch)] = x1
batch_img[(2*i_batch)+1] = x2
batch_steering[(2*i_batch)] = y1
batch_steering[(2*i_batch)+1] = y2
yield batch_img, batch_steering
# Note that the number of images within the batch will be double of the batch size input because 2 images are pulled
# for each frame
def generate_train_1img_batch(data, train_list, pr_keep, use_brightness, use_translation, trans_range, batch_size = 32):
new_row = 64
new_col = 64
thresh = 0.15
batch_size_n = int(np.round(2*batch_size))
# Create placeholder outputs (np Arrays)
batch_img = np.zeros((batch_size_n, new_row, new_col,3))
batch_steering = | np.zeros(batch_size_n) | numpy.zeros |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Compute an optimal storage control policy
to smooth out the SEAREV power production fluctuations
<NAME> — June 2013
"""
from __future__ import division, print_function, unicode_literals
import sys
from datetime import datetime
import numpy as np
import scipy.stats as stats
import matplotlib as mpl
import matplotlib.pyplot as plt
# Load Searev model data:
from searev_data import searev_power, power_max, dt
# Tweak how images are plotted with imshow
mpl.rcParams['image.interpolation'] = 'none' # no interpolation
mpl.rcParams['image.origin'] = 'lower' # origin at lower left corner
mpl.rcParams['image.aspect'] = 'auto'
try:
from stodynprog import SysDescription, DPSolver
except ImportError:
sys.path.append('..')
from stodynprog import SysDescription, DPSolver
### SEAREV+storage dynamics description
# Searev AR(2) model at 0.1 s :
c1 = 1.9799
c2 = -0.9879
innov_std = 0.00347
innov_law = stats.norm(loc=0, scale=innov_std)
# Storage rated energy and power:
E_rated = 10 # [MJ]
P_rated = 1.1 # [MW]
a = 0.00 # loss factor
print('Storage ratings: {:.2f} MW / {:.2f} MJ ({:.2f} kWh)'.format(P_rated,
E_rated, E_rated/3.6))
def dyn_searev_sto(E_sto, Speed, Accel, P_sto, innov):
'''state transition of the "SEAREV + storage" system
State variables :
* E_sto
* Speed
* Accel
Control:
* P_sto
'''
# Stored energy:
E_sto_n = E_sto + (P_sto - a*abs(P_sto))*dt
# AR(2) model of the SEAREV:
Speed_n = (c1+c2)*Speed - dt*c2*Accel + innov
Accel_n = (c1+c2-1)/dt*Speed - c2*Accel + innov/dt
return (E_sto_n, Speed_n, Accel_n)
def admissible_controls(E_sto, Speed, Accel):
'''set of admissible control U(x_k) of an Energy storage
Controls is the stored power P_sto
Contrainsts of the Energy Storage are:
1) Energy stock boundaries : 0 ≤ E(k + 1) ≤ E_rated
2) Power limitation : -P_rated ≤ P_sto ≤ P_rated
'''
# 1) Constraints on P_sto:
P_neg = np.max(( -E_sto/(1+a)/dt, -P_rated))
P_pos = | np.min(( (E_rated - E_sto)/(1-a)/dt, P_rated)) | numpy.min |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Obtaining the trajectory of the SO(3) N=1 vacuum on SL(2)x7.
Producing all the artefacts:
python3 -i -m dim4.papers.bfis2021.so3n1_on_sl2x7 all
"""
import os
# For interactive debugging only.
import pdb # pylint:disable=unused-import
import pprint
from dim4.papers.bfis2021 import analyze_sl2x7
from dim4.papers.bfis2021 import u4xr12_boundary_equilibrium
from dim4.so8.src import dyonic
from dim4.theta.src import gaugings
from m_theory_lib import algebra
from m_theory_lib import m_util as mu
from matplotlib import pyplot
import numpy
def get_theta_u4xr12(c=1.0):
"""Returns the Dyonic-U(4)|xR12 Theta-tensor."""
spin8 = algebra.g.spin8
su8 = algebra.g.su8
theta = numpy.zeros([56, 133])
d6 = numpy.diag([0.0] * 6 + [1.0] * 2)
cd6 = numpy.eye(8) - d6
theta[:28, 105:] += (-1 / 64.0) * mu.nsum(
'Iij,Kkl,ijab,klcd,ac,bd->IK',
su8.m_28_8_8, su8.m_28_8_8,
spin8.gamma_vvss, spin8.gamma_vvss,
cd6, numpy.eye(8))
theta[28:, 105:] += (-1 / 64.0) * mu.nsum(
'Iij,Kkl,ijab,klcd,ac,bd->IK',
su8.m_28_8_8, su8.m_28_8_8,
spin8.gamma_vvss, spin8.gamma_vvss,
c * d6, numpy.eye(8))
theta[:28, :35] += -(1 / 16.0) * mu.nsum(
'Iij,Aab,ijcd,ac,bd->IA',
su8.m_28_8_8,
su8.m_35_8_8,
spin8.gamma_vvss,
numpy.eye(8), d6)
theta[28:, :35] += -(1 / 16.0) * mu.nsum(
'Iij,Aab,ijcd,ac,bd->IA',
su8.m_28_8_8,
su8.m_35_8_8,
spin8.gamma_vvss,
numpy.eye(8), c * d6)
return theta
if __name__ == '__main__':
target_dir = mu.home_relative('tmp/traj_so3n1')
trajectory_npy_filename = os.path.join(
target_dir, 'trajectory_so3n1.npy')
sl2x7 = algebra.g.e7.sl2x7[:2, :, :].reshape(14, 133)
subspace_an = sl2x7[:, :70].T
sugra = dyonic.SO8c_SUGRA(subspace_an=subspace_an)
ds_step = 0.003
scan_boundary_gauging_num_samples = 50
scan_file = os.path.join(target_dir, 'u4xr12_equilibria.csv')
analyzed_file = os.path.join(target_dir, 'u4xr12_equilibria_analyzed.pytxt')
os.makedirs(target_dir, exist_ok=True)
if mu.arg_enabled(__name__, 'compute_trajectory'):
print('# Computing SO(3) N=1 trajectory on SL2x7...')
v14 = analyze_sl2x7.v14_from_7z(analyze_sl2x7.get_7z_from_bfp_z123(
# Numbers match Eq. (4.31) in BFP, https://arxiv.org/abs/1909.10969
(0.1696360+0.1415740j, 0.4833214+0.3864058j, -0.3162021-0.5162839j)))
v70_so3n1 = subspace_an.dot(v14)
# Check that we do have the correct equilibrium.
pot, stat = sugra.potential_and_stationarity(v70_so3n1,
t_omega=mu.tff64(0.0))
assert abs(-13.84096 - pot) < 1e-4 and stat < 1e-8
dyonic.analyze_omega_deformation(
mu.home_relative(target_dir),
v70_so3n1,
ds=ds_step)
glob_pos, glob_neg = (
os.path.join(target_dir, f'S1384096/omega_0.0000_{tag}_*.log')
for tag in ('pos', 'neg'))
tdata = dyonic.collect_trajectory_logs(glob_pos, glob_neg)
numpy.save(trajectory_npy_filename, tdata)
if mu.arg_enabled(__name__, 'extrapolate_and_plot'):
print('# Extrapolating trajectory and plotting...')
tdata = numpy.load(trajectory_npy_filename)
omega_min, omega_max = (-0.25 * numpy.pi), (0.5 * numpy.pi)
pot_stat_zs_js_by_omega = (
analyze_sl2x7.get_pot_stat_zs_js_by_omega_from_trajectory_data(tdata))
trajectory_fn_zs = analyze_sl2x7.get_trajectory_fn_zs(
sugra,
{omega: psz[2] for omega, psz in pot_stat_zs_js_by_omega.items()},
omega_min, omega_max)
figs, singular_values = analyze_sl2x7.plot_trajectory(
sugra,
trajectory_fn_zs,
| numpy.linspace(omega_min, omega_max, 200) | numpy.linspace |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index= | np.arange(1000, 0, step=-1) | numpy.arange |
"""."""
import math as _math
import numpy as _np
import mathphys as _mp
from .. import lattice as _lattice
from .. import accelerator as _accelerator
from .twiss import calc_twiss as _calc_twiss
from .miscellaneous import get_rf_voltage as _get_rf_voltage, \
get_revolution_frequency as _get_revolution_frequency, \
get_curlyh as _get_curlyh, get_mcf as _get_mcf
class EqParamsFromRadIntegrals:
"""."""
def __init__(self, accelerator, energy_offset=0.0):
"""."""
self._acc = _accelerator.Accelerator()
self._energy_offset = energy_offset
self._m66 = None
self._twi = None
self._alpha = 0.0
self._integralsx = _np.zeros(6)
self._integralsy = _np.zeros(6)
self.accelerator = accelerator
def __str__(self):
"""."""
rst = ''
fmti = '{:32s}: '
fmtr = '{:33s}: '
fmtn = '{:.4g}'
fmte = fmtr + fmtn
rst += fmte.format('\nEnergy [GeV]', self.accelerator.energy*1e-9)
rst += fmte.format('\nEnergy Deviation [%]', self.energy_offset*100)
ints = 'I1x,I4x,I5x,I6x'.split(',')
rst += '\n' + fmti.format(', '.join(ints))
rst += ', '.join([fmtn.format(getattr(self, x)) for x in ints])
ints = 'I1y,I4y,I5y,I6y'.split(',')
rst += '\n' + fmti.format(', '.join(ints))
rst += ', '.join([fmtn.format(getattr(self, x)) for x in ints])
ints = 'I2,I3,I3a'.split(',')
rst += '\n' + fmti.format(', '.join(ints))
rst += ', '.join([fmtn.format(getattr(self, x)) for x in ints])
ints = 'Jx,Jy,Je'.split(',')
rst += '\n' + fmti.format(', '.join(ints))
rst += ', '.join([fmtn.format(getattr(self, x)) for x in ints])
ints = 'taux,tauy,taue'.split(',')
rst += '\n' + fmti.format(', '.join(ints) + ' [ms]')
rst += ', '.join([fmtn.format(1000*getattr(self, x)) for x in ints])
ints = 'alphax,alphay,alphae'.split(',')
rst += '\n' + fmti.format(', '.join(ints) + ' [Hz]')
rst += ', '.join([fmtn.format(getattr(self, x)) for x in ints])
rst += fmte.format('\nmomentum compaction x 1e4', self.alpha*1e4)
rst += fmte.format('\nenergy loss [keV]', self.U0/1000)
rst += fmte.format('\novervoltage', self.overvoltage)
rst += fmte.format('\nsync phase [°]', self.syncphase*180/_math.pi)
rst += fmte.format('\nsync tune', self.synctune)
rst += fmte.format('\nhorizontal emittance [nm.rad]', self.emitx*1e9)
rst += fmte.format('\nvertical emittance [pm.rad]', self.emity*1e12)
rst += fmte.format('\nnatural emittance [nm.rad]', self.emit0*1e9)
rst += fmte.format('\nnatural espread [%]', self.espread0*100)
rst += fmte.format('\nbunch length [mm]', self.bunlen*1000)
rst += fmte.format('\nRF energy accep. [%]', self.rf_acceptance*100)
return rst
@property
def accelerator(self):
"""."""
return self._acc
@accelerator.setter
def accelerator(self, acc):
if isinstance(acc, _accelerator.Accelerator):
self._acc = acc
self._calc_radiation_integrals()
@property
def energy_offset(self):
"""."""
return self._energy_offset
@energy_offset.setter
def energy_offset(self, value):
self._energy_offset = float(value)
self._calc_radiation_integrals()
@property
def twiss(self):
"""."""
return self._twi
@property
def m66(self):
"""."""
return self._m66
@property
def I1x(self):
"""."""
return self._integralsx[0]
@property
def I2(self):
"""I2 is the same for x and y."""
return self._integralsx[1]
@property
def I3(self):
"""I3 is the same for x and y."""
return self._integralsx[2]
@property
def I3a(self):
"""I3a is the same for x and y."""
return self._integralsx[3]
@property
def I4x(self):
"""."""
return self._integralsx[4]
@property
def I5x(self):
"""."""
return self._integralsx[5]
@property
def I6x(self):
"""."""
return self._integralsx[6]
@property
def I1y(self):
"""."""
return self._integralsy[0]
@property
def I4y(self):
"""."""
return self._integralsy[4]
@property
def I5y(self):
"""."""
return self._integralsy[5]
@property
def I6y(self):
"""."""
return self._integralsy[6]
@property
def Jx(self):
"""."""
return 1.0 - self.I4x/self.I2
@property
def Jy(self):
"""."""
return 1.0 - self.I4y/self.I2
@property
def Je(self):
"""."""
return 2.0 + (self.I4x + self.I4y)/self.I2
@property
def alphax(self):
"""."""
Ca = _mp.constants.Ca
E0 = self._acc.energy / 1e9 # in GeV
E0 *= (1 + self._energy_offset)
leng = self._acc.length
return Ca * E0**3 * self.I2 * self.Jx / leng
@property
def alphay(self):
"""."""
Ca = _mp.constants.Ca
E0 = self._acc.energy / 1e9 # in GeV
E0 *= (1 + self._energy_offset)
leng = self._acc.length
return Ca * E0**3 * self.I2 * self.Jy / leng
@property
def alphae(self):
"""."""
Ca = _mp.constants.Ca
E0 = self._acc.energy / 1e9 # in GeV
E0 *= (1 + self._energy_offset)
leng = self._acc.length
return Ca * E0**3 * self.I2 * self.Je / leng
@property
def taux(self):
"""."""
return 1/self.alphax
@property
def tauy(self):
"""."""
return 1/self.alphay
@property
def taue(self):
"""."""
return 1/self.alphae
@property
def espread0(self):
"""."""
Cq = _mp.constants.Cq
gamma = self._acc.gamma_factor
gamma *= (1 + self._energy_offset)
return _math.sqrt(
Cq * gamma**2 * self.I3 / (2*self.I2 + self.I4x + self.I4y))
@property
def emitx(self):
"""."""
Cq = _mp.constants.Cq
gamma = self._acc.gamma_factor
gamma *= (1 + self._energy_offset)
return Cq * gamma**2 * self.I5x / (self.Jx*self.I2)
@property
def emity(self):
"""."""
Cq = _mp.constants.Cq
gamma = self._acc.gamma_factor
gamma *= (1 + self._energy_offset)
return Cq * gamma**2 * self.I5y / (self.Jy*self.I2)
@property
def emit0(self):
"""."""
return self.emitx + self.emity
@property
def U0(self):
"""."""
E0 = self._acc.energy / 1e9 # in GeV
E0 *= (1 + self._energy_offset)
rad_cgamma = _mp.constants.rad_cgamma
return rad_cgamma/(2*_math.pi) * E0**4 * self.I2 * 1e9 # in eV
@property
def overvoltage(self):
"""."""
v_cav = _get_rf_voltage(self._acc)
return v_cav/self.U0
@property
def syncphase(self):
"""."""
return _math.pi - _math.asin(1/self.overvoltage)
@property
def alpha(self):
"""."""
return self._alpha
@property
def etac(self):
"""."""
gamma = self._acc.gamma_factor
gamma *= (1 + self._energy_offset)
return 1/(gamma*gamma) - self.alpha
@property
def synctune(self):
"""."""
E0 = self._acc.energy
E0 *= (1 + self._energy_offset)
v_cav = _get_rf_voltage(self._acc)
harmon = self._acc.harmonic_number
return _math.sqrt(
self.etac*harmon*v_cav*_math.cos(self.syncphase)/(2*_math.pi*E0))
@property
def bunlen(self):
"""."""
vel = self._acc.velocity
rev_freq = _get_revolution_frequency(self._acc)
bunlen = vel * abs(self.etac) * self.espread0
bunlen /= 2*_math.pi * self.synctune * rev_freq
return bunlen
@property
def rf_acceptance(self):
"""."""
E0 = self._acc.energy
sph = self.syncphase
V = _get_rf_voltage(self._acc)
ov = self.overvoltage
h = self._acc.harmonic_number
etac = self.etac
eaccep2 = V * _math.sin(sph) / (_math.pi*h*abs(etac)*E0)
eaccep2 *= 2 * (_math.sqrt(ov**2 - 1.0) - _math.acos(1.0/ov))
return _math.sqrt(eaccep2)
@property
def sigma_rx(self):
"""."""
emitx = self.emitx
espread0 = self.espread0
return _np.sqrt(emitx*self._twi.betax + (espread0*self._twi.etax)**2)
@property
def sigma_px(self):
"""."""
emitx = self.emitx
espread0 = self.espread0
return _np.sqrt(emitx*self._twi.alphax + (espread0*self._twi.etapx)**2)
@property
def sigma_ry(self):
"""."""
emity = self.emity
espread0 = self.espread0
return _np.sqrt(emity*self._twi.betay + (espread0*self._twi.etay)**2)
@property
def sigma_py(self):
"""."""
emity = self.emity
espread0 = self.espread0
return _np.sqrt(emity*self._twi.alphay + (espread0*self._twi.etapy)**2)
def as_dict(self):
"""."""
pars = {
'energy_offset', 'twiss',
'I1x', 'I2', 'I3', 'I3a', 'I4x', 'I5x', 'I6x',
'I1y', 'I4y', 'I5y', 'I6y',
'Jx', 'Jy', 'Je',
'alphax', 'alphay', 'alphae',
'taux', 'tauy', 'taue',
'espread0',
'emitx', 'emity', 'emit0',
'bunch_length',
'U0', 'overvoltage', 'syncphase', 'synctune',
'alpha', 'etac', 'rf_acceptance',
}
dic = {par: getattr(self, par) for par in pars}
dic['energy'] = self.accelerator.energy
return dic
def _calc_radiation_integrals(self):
"""Calculate radiation integrals for periodic systems."""
acc = self._acc
twi, m66 = _calc_twiss(
acc, indices='closed', energy_offset=self._energy_offset)
self._twi = twi
self._m66 = m66
self._alpha = _get_mcf(acc, energy_offset=self._energy_offset)
spos = _lattice.find_spos(acc, indices='closed')
etax, etapx, betax, alphax = twi.etax, twi.etapx, twi.betax, twi.alphax
etay, etapy, betay, alphay = twi.etay, twi.etapy, twi.betay, twi.alphay
n = len(acc)
angle, angle_in, angle_out, K = _np.zeros((4, n))
for i in range(n):
angle[i] = acc[i].angle
angle_in[i] = acc[i].angle_in
angle_out[i] = acc[i].angle_out
K[i] = acc[i].K
idx, *_ = _np.nonzero(angle)
leng = spos[idx+1]-spos[idx]
rho = leng/angle[idx]
angle_in = angle_in[idx]
angle_out = angle_out[idx]
K = K[idx]
etax_in, etax_out = etax[idx], etax[idx+1]
etapx_in, etapx_out = etapx[idx], etapx[idx+1]
betax_in, betax_out = betax[idx], betax[idx+1]
alphax_in, alphax_out = alphax[idx], alphax[idx+1]
etay_in, etay_out = etay[idx], etay[idx+1]
etapy_in, etapy_out = etapy[idx], etapy[idx+1]
betay_in, betay_out = betay[idx], betay[idx+1]
alphay_in, alphay_out = alphay[idx], alphay[idx+1]
Hx_in = _get_curlyh(betax_in, alphax_in, etax_in, etapx_in)
Hx_out = _get_curlyh(betax_out, alphax_out, etax_in, etapx_out)
Hy_in = _get_curlyh(betay_in, alphay_in, etay_in, etapy_in)
Hy_out = _get_curlyh(betay_out, alphay_out, etay_in, etapy_out)
etax_avg = (etax_in + etax_out) / 2
etay_avg = (etay_in + etay_out) / 2
Hx_avg = (Hx_in + Hx_out) / 2
Hy_avg = (Hy_in + Hy_out) / 2
rho2, rho3 = rho**2, rho**3
rho3abs = _np.abs(rho3)
integralsx = _np.zeros(7)
integralsx[0] = _np.dot(etax_avg/rho, leng)
integralsx[1] = _np.dot(1/rho2, leng)
integralsx[2] = _np.dot(1/rho3abs, leng)
integralsx[3] = _np.dot(1/rho3, leng)
integralsx[4] = _np.dot(etax_avg/rho3 * (1+2*rho2*K), leng)
# for general wedge magnets:
integralsx[4] += sum((etax_in/rho2) * _np.tan(angle_in))
integralsx[4] += sum((etax_out/rho2) * _np.tan(angle_out))
integralsx[5] = _np.dot(Hx_avg / rho3abs, leng)
integralsx[6] = _np.dot((K*etax_avg)**2, leng)
self._integralsx = integralsx
integralsy = _np.zeros(7)
integralsy[0] = _np.dot(etay_avg/rho, leng)
integralsy[1] = integralsx[1]
integralsy[2] = integralsx[2]
integralsy[3] = integralsx[3]
integralsy[4] = _np.dot(etay_avg/rho3 * (1+2*rho2*K), leng)
# for general wedge magnets:
integralsy[4] += sum((etay_in/rho2) * _np.tan(angle_in))
integralsy[4] += sum((etay_out/rho2) * | _np.tan(angle_out) | numpy.tan |
##Syntax: run dssp_output_analysis.py length_of_protein dssp_output*.txt
import sys
from numpy import genfromtxt
import numpy as np
import os
from shutil import copy
phi_psi_outfile = 'output_phi_phi.txt'
tco_outfile = 'output_tco.txt'
racc_outfile = 'output_racc.txt'
hbond_outfile = 'output_hbond.txt'
hbond_total_outfile = 'output_hbondtotal.txt'
acc_total_outfile = 'output_acc_total.txt'
phi_psi_2his_outfile = 'output_phi_psi_2his.txt'
phi_psi_2his_no_GLY_outfile = 'output_phi_psi_no_GLY_2his.txt'
import_for_length = genfromtxt(sys.argv[1], delimiter='\t', dtype=float)
length = len(import_for_length)
#Creating Keys for computing relative solvent accessible surface area
#Values obtained from Wilke: Tien et al. 2013 http://dx.doi.org/10.1371/journal.pone.0080635
aa_acc_max = { \
'A': 129.0, 'R': 274.0, 'N': 195.0, 'D': 193.0,\
'C': 167.0, 'Q': 225.0, 'E': 223.0, 'G': 104.0,\
'H': 224.0, 'I': 197.0, 'L': 201.0, 'K': 236.0,\
'M': 224.0, 'F': 240.0, 'P': 159.0, 'S': 155.0,\
'T': 172.0, 'W': 285.0, 'Y': 263.0, 'V': 174.0}
#Creating Key for linking each amino acid to a Phi-Psi matrix
ALA = []
ARG = []
ASN = []
ASP = []
CYS = []
GLN = []
GLU = []
GLY = []
HIS = []
ILE = []
LEU = []
LYS = []
MET = []
PHE = []
PRO = []
SER = []
THR = []
TRP = []
TYR = []
VAL = []
aa_phi_mat = { \
'A': ALA, 'R': ARG, 'N': ASN, 'D': ASP,\
'C': CYS, 'Q': GLN, 'E': GLU, 'G': GLY,\
'H': HIS, 'I': ILE, 'L': LEU, 'K': LYS,\
'M': MET, 'F': PHE, 'P': PRO, 'S': SER,\
'T': THR, 'W': TRP, 'Y': TYR, 'V': VAL}
ALA_2 = []
ARG_2 = []
ASN_2 = []
ASP_2 = []
CYS_2 = []
GLN_2 = []
GLU_2 = []
GLY_2 = []
HIS_2 = []
ILE_2 = []
LEU_2 = []
LYS_2 = []
MET_2 = []
PHE_2 = []
PRO_2 = []
SER_2 = []
THR_2 = []
TRP_2 = []
TYR_2 = []
VAL_2 = []
Full_phi_psi_matrix = [ALA, ALA_2, ARG, ARG_2, ASN, ASN_2, ASP, ASP_2, CYS, CYS_2, GLN, GLN_2, GLU, GLU_2, GLY, GLY_2, HIS, HIS_2, ILE, ILE_2, LEU, LEU_2, LYS, LYS_2, MET, MET_2, PHE, PHE_2, PRO, PRO_2, SER, SER_2, THR, THR_2, TRP, TRP_2, TYR, TYR_2, VAL, VAL_2]
aa_psi_mat = { \
'A': ALA_2, 'R': ARG_2, 'N': ASN_2, 'D': ASP_2,\
'C': CYS_2, 'Q': GLN_2, 'E': GLU_2, 'G': GLY_2,\
'H': HIS_2, 'I': ILE_2, 'L': LEU_2, 'K': LYS_2,\
'M': MET_2, 'F': PHE_2, 'P': PRO_2, 'S': SER_2,\
'T': THR_2, 'W': TRP_2, 'Y': TYR_2, 'V': VAL_2}
#Building Matricies for Holding/Analyzing Data
racc_matrix = np.empty([len(sys.argv), int(length)])
tco_matrix = np.empty([len(sys.argv), int(length)])
full_hbonding_matrix = np.empty([len(sys.argv), 14])
total_acc_matrix = []
total_hbond_matrix = []
percent_data_array = np.zeros([length, 3]) # Helix, Sheet, Loop
for fnu,fna in enumerate(sys.argv[2:]):
lines = open(fna).readlines()
total_acc_matrix.append(float(lines[7][1:8]))
total_hbond_matrix.append(float(lines[8][2:6]))
for idx,item in enumerate(lines[8:22]):
full_hbonding_matrix[fnu][idx] = int(item[2:6])
for idx,item in enumerate(lines[28:]):
res_num = int(item[6:10])
res_aa = item[13]
if res_aa == 'X':
res_aa = 'Y'
max_for_rel = aa_acc_max[res_aa]
res_ss = item[16]
res_acc = float(int(item[35:38]))
res_rel_acc = res_acc/max_for_rel
racc_matrix[fnu][idx] = res_rel_acc
res_tco = float(item[85:92])
#if res_tco > 0.75:
# res_ss = 'H'
#if res_tco < -0.75:
# res_ss = 'E'
if res_ss == 'E' or res_ss == 'B':
percent_data_array[idx][1] += 1
elif res_ss == 'H' or res_ss == 'G' or res_ss == 'I':
percent_data_array[idx][0] += 1
else:
percent_data_array[idx][2] += 1
tco_matrix[fnu][idx] = res_tco
res_phi = float(item[103:109])
aa_phi_mat[res_aa].append(res_phi)
res_psi = float(item[109:115])
aa_psi_mat[res_aa].append(res_psi)
#Full_phi_psi_matrix_map = map(None, *Full_phi_psi_matrix)
#pp_out = open(phi_psi_outfile, 'w')
#for i in range(len(Full_phi_psi_matrix_map)):
# for j in range(len(Full_phi_psi_matrix_map[0])):
# pp_out.write("%s\t" % Full_phi_psi_matrix_map[i][j])
# pp_out.write("\n")
#pp_out.close()
full_phi_list = np.empty((0,0))
full_phi_list = np.append(full_phi_list, ALA)
full_phi_list = np.append(full_phi_list, ARG)
full_phi_list = np.append(full_phi_list, ASN)
full_phi_list = np.append(full_phi_list, ASP)
full_phi_list = np.append(full_phi_list, CYS)
full_phi_list = np.append(full_phi_list, GLN)
full_phi_list = np.append(full_phi_list, GLU)
full_phi_list = np.append(full_phi_list, GLY)
full_phi_list = np.append(full_phi_list, HIS)
full_phi_list = np.append(full_phi_list, ILE)
full_phi_list = np.append(full_phi_list, LEU)
full_phi_list = np.append(full_phi_list, LYS)
full_phi_list = np.append(full_phi_list, MET)
full_phi_list = np.append(full_phi_list, PHE)
full_phi_list = np.append(full_phi_list, PRO)
full_phi_list = np.append(full_phi_list, SER)
full_phi_list = np.append(full_phi_list, THR)
full_phi_list = np.append(full_phi_list, TRP)
full_phi_list = np.append(full_phi_list, TYR)
full_phi_list = np.append(full_phi_list, VAL)
full_phi_list_no_GLY = []
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ALA)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ARG)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ASN)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ASP)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, CYS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, GLN)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, GLU)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, HIS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, ILE)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, LEU)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, LYS)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, MET)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, PHE)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, PRO)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, SER)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, THR)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, TRP)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, TYR)
full_phi_list_no_GLY = np.append(full_phi_list_no_GLY, VAL)
full_psi_list = []
full_psi_list = np.append(full_psi_list, ALA_2)
full_psi_list = np.append(full_psi_list, ARG_2)
full_psi_list = np.append(full_psi_list, ASN_2)
full_psi_list = np.append(full_psi_list, ASP_2)
full_psi_list = np.append(full_psi_list, CYS_2)
full_psi_list = np.append(full_psi_list, GLN_2)
full_psi_list = np.append(full_psi_list, GLU_2)
full_psi_list = np.append(full_psi_list, GLY_2)
full_psi_list = np.append(full_psi_list, HIS_2)
full_psi_list = np.append(full_psi_list, ILE_2)
full_psi_list = np.append(full_psi_list, LEU_2)
full_psi_list = np.append(full_psi_list, LYS_2)
full_psi_list = np.append(full_psi_list, MET_2)
full_psi_list = np.append(full_psi_list, PHE_2)
full_psi_list = np.append(full_psi_list, PRO_2)
full_psi_list = np.append(full_psi_list, SER_2)
full_psi_list = np.append(full_psi_list, THR_2)
full_psi_list = np.append(full_psi_list, TRP_2)
full_psi_list = np.append(full_psi_list, TYR_2)
full_psi_list = np.append(full_psi_list, VAL_2)
full_psi_list_no_GLY = []
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ALA_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ARG_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ASN_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ASP_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, CYS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, GLN_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, GLU_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, HIS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, ILE_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, LEU_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, LYS_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, MET_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, PHE_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, PRO_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, SER_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, THR_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, TRP_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, TYR_2)
full_psi_list_no_GLY = np.append(full_psi_list_no_GLY, VAL_2)
phi_psi_2his_1, phi_psi_2his_2, phi_psi_2his_3 = np.histogram2d(full_phi_list, full_psi_list, bins=121, range=[[-180,180], [-180,180]])
phi_psi_2his_no_GLY_1, phi_psi_2his_no_GLY_2, phi_psi_2his_no_GLY_3 = np.histogram2d(full_phi_list_no_GLY, full_psi_list_no_GLY, bins=121, range=[[-180,0], [-180,180]])
tam_out = open(acc_total_outfile, 'w')
for i in range(len(total_acc_matrix)):
tam_out.write("%s\n" % total_acc_matrix[i])
tam_out.close()
thm_out = open(hbond_total_outfile, 'w')
for i in range(len(total_hbond_matrix)):
thm_out.write("%s\n" % total_hbond_matrix[i])
thm_out.close()
#percent_helix = percent_helix/len(sys.argv[2:])
#percent_sheet = percent_sheet/len(sys.argv[2:])
#percent_loop = percent_loop/len(sys.argv[2:])
#percent_array = [('% Helix --> ', percent_helix), ('% Sheet --> ', percent_sheet), ('% Loop --> ', percent_loop)]
percent_data_array = percent_data_array/len(sys.argv[2:])
np.savetxt('Percent_HEL.txt', percent_data_array, fmt='%s', delimiter=' ', newline='\n')
avg_hbonding_matrix = np.average(full_hbonding_matrix, axis=0)
avg_tco_matrix = np.average(tco_matrix, axis=0)
avg_racc_matrix = np.average(racc_matrix, axis=0)
std_hbonding_matrix = np.std(full_hbonding_matrix, axis=0)
std_tco_matrix = np.std(tco_matrix, axis=0)
std_racc_matrix = np.std(racc_matrix, axis=0)
comb_tco_matrix = np.column_stack((avg_tco_matrix, std_tco_matrix))
comb_racc_matrix = np.column_stack((avg_racc_matrix, std_racc_matrix))
comb_hbonding_matrix = np.column_stack((avg_hbonding_matrix, std_hbonding_matrix))
| np.savetxt(tco_outfile, comb_tco_matrix, fmt='%s', delimiter=' ', newline='\n') | numpy.savetxt |
from collections import OrderedDict
import numpy as np
from gym.spaces import Box, Dict
from multiworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera
from tqdm import *
class SawyerPickAndPlaceEnv(MultitaskEnv, SawyerXYZEnv):
def __init__(
self,
obj_low=None,
obj_high=None,
reward_type='hand_and_obj_distance',
indicator_threshold=0.06,
distance_threshold=0.06,
obj_init_positions=((0, 0.6, 0.02),),
random_init=False,
fix_goal=False,
fixed_goal=(0.15, 0.6, 0.055, -0.15, 0.6),
goal_low=None,
goal_high=None,
reset_free=False,
hide_goal_markers=False,
oracle_reset_prob=0.0,
presampled_goals=None,
num_goals_presampled=1000,
p_obj_in_hand=.75,
**kwargs
):
self.quick_init(locals())
MultitaskEnv.__init__(self)
SawyerXYZEnv.__init__(
self,
model_name=self.model_name,
**kwargs
)
if obj_low is None:
obj_low = self.hand_low
if obj_high is None:
obj_high = self.hand_high
self.obj_low = obj_low
self.obj_high = obj_high
if goal_low is None:
goal_low = np.hstack((self.hand_low, obj_low))
if goal_high is None:
goal_high = np.hstack((self.hand_high, obj_high))
self.reward_type = reward_type
self.random_init = random_init
self.p_obj_in_hand = p_obj_in_hand
self.indicator_threshold = indicator_threshold
self.distance_threshold = distance_threshold
self.obj_init_z = obj_init_positions[0][2]
self.obj_init_positions = np.array(obj_init_positions)
self.last_obj_pos = self.obj_init_positions[0]
self.fix_goal = fix_goal
self.fixed_goal = np.array(fixed_goal)
self._state_goal = None
self.reset_free = reset_free
self.oracle_reset_prob = oracle_reset_prob
self.hide_goal_markers = hide_goal_markers
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
dtype=np.float32
)
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
dtype=np.float32
)
self.hand_space = Box(
self.hand_low,
self.hand_high,
dtype=np.float32
)
self.gripper_and_hand_and_obj_space = Box(
np.hstack(([0.0], self.hand_low, obj_low)),
| np.hstack(([0.04], self.hand_high, obj_high)) | numpy.hstack |
"""
Vector Autoregression
Author: <NAME>
License: Simplified-BSD
"""
import numpy as np
from _var import _var_quantities, _var_quantities_kron
class VAR(object):
def __init__(self, nobs=None, k_endog=None, endog=None, order=1,
*args, **kwargs):
# Get the endog dimensions
self.order = order
if endog is not None:
endog = np.array(endog, order='F')
self.nobs, self.k_endog = endog.shape
self.endog = endog
else:
if nobs is None or k_endog is None:
raise ValueError('Must either provide endogenous array or'
' endogenous dimensions.')
self.nobs = nobs
self.k_endog = k_endog
# Set additional dimensions
# self.k_endog # M
# self.order # d
self.k_ar = self.k_endog * self.order # k_i
self.k_var = self.k_ar * self.k_endog # k = \sum_i k_i
# Create storage arrays
# Z is M x k, H is M x M
# _ZH actually stores (Z'H)', see _var for details
self._ZH = | np.zeros((self.k_endog, self.k_var), order='F') | numpy.zeros |
from __future__ import print_function, division
import argparse
import os, sys, shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torchvision.utils as vutils
import torch.nn.functional as F
import numpy as np
import time
from tensorboardX import SummaryWriter
from datasets import __datasets__
from models import __models__, __loss__
from utils import *
import gc
from datasets.warp_ops import *
import cv2
cudnn.benchmark = True
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
parser = argparse.ArgumentParser(description='Cascade Stereo Network (CasStereoNet)')
parser.add_argument('--model', default='gwcnet-c', help='select a model structure', choices=__models__.keys())
parser.add_argument('--maxdisp', type=int, default=192, help='maximum disparity')
parser.add_argument('--dataset', required=True, help='dataset name', choices=__datasets__.keys())
parser.add_argument('--datapath', required=True, help='data path')
parser.add_argument('--depthpath', required=True, help='depth path')
parser.add_argument('--test_dataset', required=True, help='dataset name', choices=__datasets__.keys())
parser.add_argument('--test_datapath', required=True, help='data path')
parser.add_argument('--test_sim_datapath', required=True, help='data path')
parser.add_argument('--test_real_datapath', required=True, help='data path')
parser.add_argument('--trainlist', required=True, help='training list')
parser.add_argument('--testlist', required=True, help='testing list')
parser.add_argument('--sim_testlist', required=True, help='testing list')
parser.add_argument('--real_testlist', required=True, help='testing list')
parser.add_argument('--lr', type=float, default=0.001, help='base learning rate')
parser.add_argument('--batch_size', type=int, default=1, help='training batch size')
parser.add_argument('--test_batch_size', type=int, default=1, help='testing batch size')
parser.add_argument('--epochs', type=int, required=True, help='number of epochs to train')
parser.add_argument('--lrepochs', type=str, required=True, help='the epochs to decay lr: the downscale rate')
parser.add_argument('--logdir', required=True, help='the directory to save logs and checkpoints')
parser.add_argument('--loadckpt', help='load the weights from a specific checkpoint')
parser.add_argument('--resume', action='store_true', help='continue training the model')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--summary_freq', type=int, default=50, help='the frequency of saving summary')
parser.add_argument('--test_summary_freq', type=int, default=50, help='the frequency of saving summary')
parser.add_argument('--save_freq', type=int, default=1, help='the frequency of saving checkpoint')
parser.add_argument('--log_freq', type=int, default=50, help='log freq')
parser.add_argument('--eval_freq', type=int, default=1, help='eval freq')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--mode', type=str, default="train", help='train or test mode')
parser.add_argument('--ndisps', type=str, default="48,24", help='ndisps')
parser.add_argument('--disp_inter_r', type=str, default="4,1", help='disp_intervals_ratio')
parser.add_argument('--dlossw', type=str, default="0.5,2.0", help='depth loss weight for different stage')
parser.add_argument('--cr_base_chs', type=str, default="32,32,16", help='cost regularization base channels')
parser.add_argument('--grad_method', type=str, default="detach", choices=["detach", "undetach"], help='predicted disp detach, undetach')
parser.add_argument('--using_ns', action='store_true', help='using neighbor search')
parser.add_argument('--ns_size', type=int, default=3, help='nb_size')
parser.add_argument('--crop_height', type=int, required=True, help="crop height")
parser.add_argument('--crop_width', type=int, required=True, help="crop width")
parser.add_argument('--test_crop_height', type=int, required=True, help="crop height")
parser.add_argument('--test_crop_width', type=int, required=True, help="crop width")
parser.add_argument('--using_apex', action='store_true', help='using apex, need to install apex')
parser.add_argument('--sync_bn', action='store_true',help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str, default="O0")
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--ground', action='store_true', help='include ground pixel')
# parse arguments
args = parser.parse_args()
os.makedirs(args.logdir, exist_ok=True)
#using sync_bn by using nvidia-apex, need to install apex.
if args.sync_bn:
assert args.using_apex, "must set using apex and install nvidia-apex"
if args.using_apex:
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
#dis
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
args.is_distributed = is_distributed
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
#set seed
set_random_seed(args.seed)
if (not is_distributed) or (dist.get_rank() == 0):
# create summary logger
print("argv:", sys.argv[1:])
print_args(args)
print("creating new summary file")
logger = SummaryWriter(args.logdir)
# model
model = __models__[args.model](
maxdisp=args.maxdisp,
ndisps=[int(nd) for nd in args.ndisps.split(",") if nd],
disp_interval_pixel=[float(d_i) for d_i in args.disp_inter_r.split(",") if d_i],
cr_base_chs=[int(ch) for ch in args.cr_base_chs.split(",") if ch],
grad_method=args.grad_method,
using_ns=args.using_ns,
ns_size=args.ns_size
)
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model_loss = __loss__[args.model]
model.cuda()
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
#optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
# load parameters
start_epoch = 0
if args.resume:
# find all checkpoints file and sort according to epoch id
all_saved_ckpts = [fn for fn in os.listdir(args.logdir) if (fn.endswith(".ckpt") and not fn.endswith("best.ckpt"))]
all_saved_ckpts = sorted(all_saved_ckpts, key=lambda x: int(x.split('_')[-1].split('.')[0]))
# use the latest checkpoint file
loadckpt = os.path.join(args.logdir, all_saved_ckpts[-1])
print("loading the lastest model in logdir: {}".format(loadckpt))
state_dict = torch.load(loadckpt, map_location=torch.device("cpu"))
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load the checkpoint file specified by args.loadckpt
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt, map_location=torch.device("cpu"))
model.load_state_dict(state_dict['model'])
print("start at epoch {}".format(start_epoch))
if args.using_apex:
# Initialize Amp
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
#conver model to dist
if is_distributed:
print("Dist Train, Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
# find_unused_parameters=False,
# this should be removed if we update BatchNorm stats
# broadcast_buffers=False,
)
else:
if torch.cuda.is_available():
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
# dataset, dataloader
StereoDataset = __datasets__[args.dataset]
Test_StereoDataset = __datasets__[args.test_dataset]
Test_sim_StereoDataset = __datasets__[args.test_dataset]
Test_real_StereoDataset = __datasets__[args.test_dataset]
train_dataset = StereoDataset(args.datapath, args.depthpath, args.trainlist, True,
crop_height=args.crop_height, crop_width=args.crop_width,
test_crop_height=args.test_crop_height, test_crop_width=args.test_crop_width,
left_img="0128_irL_denoised_half.png", right_img="0128_irR_denoised_half.png", args=args)
test_dataset = Test_StereoDataset(args.test_datapath, args.depthpath, args.testlist, False,
crop_height=args.crop_height, crop_width=args.crop_width,
test_crop_height=args.test_crop_height, test_crop_width=args.test_crop_width,
left_img="0128_irL_denoised_half.png", right_img="0128_irR_denoised_half.png", args=args)
sim_test_dataset = Test_sim_StereoDataset(args.test_sim_datapath, args.depthpath, args.sim_testlist, False,
crop_height=args.crop_height, crop_width=args.crop_width,
test_crop_height=args.test_crop_height, test_crop_width=args.test_crop_width,
left_img="0128_irL_denoised_half.png", right_img="0128_irR_denoised_half.png", args=args)
real_test_dataset = Test_real_StereoDataset(args.test_real_datapath, args.depthpath, args.real_testlist, False,
crop_height=args.crop_height, crop_width=args.crop_width,
test_crop_height=args.test_crop_height, test_crop_width=args.test_crop_width,
left_img="1024_irL_real_1080.png", right_img="1024_irR_real_1080.png", args=args)
if is_distributed:
train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
rank=dist.get_rank())
test_sampler = torch.utils.data.DistributedSampler(test_dataset, num_replicas=dist.get_world_size(),
rank=dist.get_rank())
TrainImgLoader = torch.utils.data.DataLoader(train_dataset, args.batch_size, sampler=train_sampler, num_workers=1,
drop_last=True, pin_memory=True)
TestImgLoader = torch.utils.data.DataLoader(test_dataset, args.test_batch_size, sampler=test_sampler, num_workers=1,
drop_last=False, pin_memory=True)
else:
TrainImgLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=8, drop_last=True)
TestImgLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,
shuffle=False, num_workers=4, drop_last=False)
SimTestImgLoader = torch.utils.data.DataLoader(sim_test_dataset, batch_size=args.test_batch_size,
shuffle=False, num_workers=4, drop_last=False)
RealTestImgLoader = torch.utils.data.DataLoader(real_test_dataset, batch_size=args.test_batch_size,
shuffle=False, num_workers=4, drop_last=False)
num_stage = len([int(nd) for nd in args.ndisps.split(",") if nd])
def train():
avg_test_scalars = None
Cur_D1 = 1
for epoch_idx in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch_idx, args.lr, args.lrepochs)
# training
for batch_idx, sample in enumerate(TrainImgLoader):
global_step = len(TrainImgLoader) * epoch_idx + batch_idx
start_time = time.time()
do_summary = global_step % args.summary_freq == 0
loss, scalar_outputs, image_outputs = train_sample(sample, compute_metrics=do_summary)
if (not is_distributed) or (dist.get_rank() == 0):
if do_summary:
save_scalars(logger, 'train', scalar_outputs, global_step)
save_images(logger, 'train', image_outputs, global_step)
#save_texts(logger, 'train', text_outputs, global_step)
del scalar_outputs, image_outputs
if batch_idx % args.log_freq == 0:
if isinstance(loss, (list, tuple)):
loss = loss[0]
print('Epoch {}/{}, Iter {}/{}, lr {:.5f}, train loss = {:.3f}, time = {:.3f}'.format(epoch_idx,
args.epochs,
batch_idx,
len(TrainImgLoader),
optimizer.param_groups[0]["lr"],
loss, time.time() - start_time))
# saving checkpoints
if (epoch_idx + 1) % args.save_freq == 0:
if (not is_distributed) or (dist.get_rank() == 0):
checkpoint_data = {'epoch': epoch_idx, 'model': model.module.state_dict(), 'optimizer': optimizer.state_dict()}
save_filename = "{}/checkpoint_{:0>6}.ckpt".format(args.logdir, epoch_idx)
torch.save(checkpoint_data, save_filename)
gc.collect()
if (epoch_idx % args.eval_freq == 0) or (epoch_idx == args.epochs - 1):
# testing
avg_test_scalars = AverageMeterDict()
for batch_idx, sample in enumerate(TestImgLoader):
global_step = len(TestImgLoader) * epoch_idx + batch_idx
start_time = time.time()
do_summary = global_step % args.test_summary_freq == 0
loss, scalar_outputs, image_outputs, _, _ = test_sample(sample, compute_metrics=do_summary)
if (not is_distributed) or (dist.get_rank() == 0):
if do_summary:
save_scalars(logger, 'test', scalar_outputs, global_step)
save_images(logger, 'test', image_outputs, global_step)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs, image_outputs
if batch_idx % args.log_freq == 0:
if isinstance(loss, (list, tuple)):
loss = loss[0]
print('Epoch {}/{}, Iter {}/{}, test loss = {:.3f}, time = {:3f}'.format(epoch_idx, args.epochs,
batch_idx,
len(TestImgLoader), loss,
time.time() - start_time))
if (not is_distributed) or (dist.get_rank() == 0):
avg_test_scalars = avg_test_scalars.mean()
save_scalars(logger, 'fulltest', avg_test_scalars, len(TrainImgLoader) * (epoch_idx + 1))
print("avg_test_scalars", avg_test_scalars)
# saving bset checkpoints
if (not is_distributed) or (dist.get_rank() == 0):
if avg_test_scalars is not None:
New_D1 = avg_test_scalars["D1"][0]
if New_D1 < Cur_D1:
Cur_D1 = New_D1
#save
checkpoint_data = {'epoch': epoch_idx, 'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
save_filename = "{}/checkpoint_best.ckpt".format(args.logdir)
torch.save(checkpoint_data, save_filename)
print("Best Checkpoint epoch_idx:{}".format(epoch_idx))
gc.collect()
# train one sample
def train_sample(sample, compute_metrics=False):
model.train()
imgL, imgR, disp_gt = sample['left'], sample['right'], sample['disparity']
imgL = imgL.cuda()
imgR = imgR.cuda()
disp_gt = disp_gt.cuda()
#disp_gt_b = disp_gt
#print(disp_gt.shape)
disp_gt_t = disp_gt.reshape((2,1,256,512))
disparity_L_from_R = apply_disparity_cu(disp_gt_t, disp_gt_t.int())
#disp_gt = disparity_L_from_R.reshape((1,2,256,512))
disp_gt = disparity_L_from_R.reshape((2,256,512))
disp_gt = cv2.medianBlur(disp_gt.cpu().numpy(),3)
disp_gt = torch.from_numpy(disp_gt).cuda()
#print(disp_gt.shape)
#disp_gt_a = disp_gt
optimizer.zero_grad()
outputs = model(imgL, imgR)
mask = (disp_gt < args.maxdisp) & (disp_gt > 0)
loss = model_loss(outputs, disp_gt, mask, dlossw=[float(e) for e in args.dlossw.split(",") if e])
outputs_stage = outputs["stage{}".format(num_stage)]
disp_ests = [outputs_stage["pred1"], outputs_stage["pred2"], outputs_stage["pred3"]]
scalar_outputs = {"loss": loss}
image_outputs = {"disp_est": disp_ests, "disp_gt": disp_gt, "imgL": imgL, "imgR": imgR}
#text_outputs = {}
if compute_metrics:
with torch.no_grad():
image_outputs["errormap"] = [disp_error_image_func.apply(disp_est, disp_gt) for disp_est in disp_ests]
scalar_outputs["EPE"] = [EPE_metric(disp_est, disp_gt, mask) for disp_est in disp_ests]
scalar_outputs["D1"] = [D1_metric(disp_est, disp_gt, mask) for disp_est in disp_ests]
scalar_outputs["Thres1"] = [Thres_metric(disp_est, disp_gt, mask, 1.0) for disp_est in disp_ests]
scalar_outputs["Thres2"] = [Thres_metric(disp_est, disp_gt, mask, 2.0) for disp_est in disp_ests]
scalar_outputs["Thres3"] = [Thres_metric(disp_est, disp_gt, mask, 3.0) for disp_est in disp_ests]
#text_outputs["before_warp"] = [str(disp_gt_b)]
#text_outputs["after_warp"] = [str(disp_gt_a)]
if is_distributed and args.using_apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if is_distributed:
scalar_outputs = reduce_scalar_outputs(scalar_outputs)
return tensor2float(scalar_outputs["loss"]), tensor2float(scalar_outputs), image_outputs
# test one sample
@make_nograd_func
def test_sample(sample, compute_metrics=True):
if is_distributed:
model_eval = model.module
else:
model_eval = model
model_eval.eval()
imgL, imgR, disp_gt, dep_gt, f, b, label, obj_ids = sample['left'], sample['right'], sample['disparity'], sample['depth'], sample['f'], sample['baseline'], sample['label'], sample['obj_ids']
imgL = imgL.cuda()
imgR = imgR.cuda()
disp_gt = disp_gt.cuda()
#print(disp_gt.shape)
disp_gt_t = disp_gt.reshape((1,1,768,1248))
#disp_gt_rgb = disp_rgb.reshape((1,1,540,960)).cuda()
#label = label.reshape((1,1,540,960)).cuda()
#print(torch.max(disp_gt_t), torch.max(disp_gt_t.int()), torch.min(disp_gt_t), torch.min(disp_gt_t.int()))
#print(torch.max(label), torch.max(disp_gt_rgb.int()), torch.min(label), torch.min(disp_gt_rgb.int()))
disparity_L_from_R = apply_disparity_cu(disp_gt_t, disp_gt_t.int())
#disparity_L_from_R = torch.ones((768,1248))
disp_gt = disparity_L_from_R.reshape((768,1248)).cpu().numpy()
#label_rgb = label_rgb.reshape((1,540,960)).cuda()
disp_gt = cv2.medianBlur(disp_gt,3)
disp_gt = torch.from_numpy(disp_gt).cuda().reshape((1,768,1248))
outputs = model_eval(imgL, imgR)
mask = (disp_gt < args.maxdisp) & (disp_gt > 0)
#print(mask.reshape)
loss = torch.tensor(0, dtype=imgL.dtype, device=imgL.device, requires_grad=False) #model_loss(outputs, disp_gt, mask, dlossw=[float(e) for e in args.dlossw.split(",") if e])
outputs_stage = outputs["stage{}".format(num_stage)]
disp_ests = [outputs_stage["pred"]]
scalar_outputs = {"loss": loss}
#obj_err = np.zeros(17)
obj_ct = np.zeros(17, dtype=int)
#image_outputs = {"disp_est": disp_ests, "disp_gt": disp_gt, "imgL": imgL, "imgR": imgR, "label": label_rgb}
depest = disp_ests[0].cpu().numpy()[0]
depest = depest[228:,:960]
dispgt = disp_gt.cpu().numpy()[0]
dispgt = dispgt[228:,:960]
dep_gt_c = dep_gt.cpu().numpy()[0]
obj_ids = obj_ids.cpu().numpy()[0]
label = label[0].cpu().numpy()[0]
#print("dep_gt_c ", dep_gt_c.shape)
#print("obj_ids ", obj_ids.shape)
#print("label ", label.shape)
obj_ct[obj_ids] = 1
#label_rgb = apply_disparity_cu(label, disp_gt_rgb.int())
#label_rgb = label_rgb.reshape((1,540,960))
#label = warp(label, disp_rgb)
#label = label.cpu().numpy()[0]
#print(label.shape)
#print(dispgt.shape)
if args.ground:
maskest = (dispgt < args.maxdisp) & (dispgt > 0)
else:
maskest = (dispgt < args.maxdisp) & (dispgt > 0) & (dep_gt_c <= 1250) & (dep_gt_c > 0)
#print("mask:", np.sum(maskest))
#print("back:", np.sum(label == 18))
maskest2 = (depest == 0)
depest = np.divide(f*b, depest)
depest[maskest2] = 0
obj_avg_err = obj_analysis(label, obj_ids, dep_gt_c, np.asarray(depest))
#print(depest.dtype, dep_gt.dtype, depest.shape, dep_gt.shape)
dep_err_map = np.abs(np.asarray(depest) - np.asarray(dep_gt[0]))
dep_err = np.mean(dep_err_map[maskest])
dep_2 = np.sum(dep_err_map[maskest] > 2)/np.sum(maskest)
dep_4 = np.sum(dep_err_map[maskest] > 4)/np.sum(maskest)
dep_8 = np.sum(dep_err_map[maskest] > 8)/np.sum(maskest)
disp_ests_bad = disp_ests[0].cpu().numpy()[0]
disp_ests_gt_bad = disp_gt.cpu().numpy()[0]
disp_ests_bad = disp_ests_bad[228:,:960]
disp_ests_gt_bad = disp_ests_gt_bad[228:,:960]
#print(disp_ests_bad.shape, disp_ests_gt_bad.shape)
bad1 = np.sum(np.abs(disp_ests_bad - disp_ests_gt_bad)[maskest] > 1)/np.sum(maskest)
bad2 = np.sum(np.abs(disp_ests_bad - disp_ests_gt_bad)[maskest] > 2)/np.sum(maskest)
#dep_gt = disp_gt
scalar_outputs["D1"] = [D1_metric(disp_est, disp_gt, mask) for disp_est in disp_ests]
scalar_outputs["EPE"] = [EPE_metric(disp_est[:,228:,:960], disp_gt[:,228:,:960], torch.tensor(maskest).reshape((1,540,960))) for disp_est in disp_ests]
scalar_outputs["Thres1"] = [Thres_metric(disp_est, disp_gt, mask, 1.0) for disp_est in disp_ests]
scalar_outputs["Thres2"] = [Thres_metric(disp_est, disp_gt, mask, 2.0) for disp_est in disp_ests]
scalar_outputs["Thres3"] = [Thres_metric(disp_est, disp_gt, mask, 3.0) for disp_est in disp_ests]
scalar_outputs["bad1.0"] = [bad1]
scalar_outputs["bad2.0"] = [bad2]
scalar_outputs["dep_err"] = [dep_err]
scalar_outputs["dep2"] = [dep_2]
scalar_outputs["dep4"] = [dep_4]
scalar_outputs["dep8"] = [dep_8]
feature_outputs = [outputs["fea"][:,i,:,:] for i in range(32)]
label = torch.tensor(label, dtype=float).reshape((1,540,960))
image_outputs = {"disp_est": disp_ests[0][:,228:,:960], "disp_gt": disp_gt[:,228:,:960], "imgL": imgL[:,:,228:,:960], "imgR": imgR[:,:,228:,:960], "label": label, "feature": feature_outputs}
if compute_metrics:
image_outputs["errormap"] = [(disp_error_image_func.apply(disp_est, disp_gt))[:,:,228:,:960] for disp_est in disp_ests]
if is_distributed:
scalar_outputs = reduce_scalar_outputs(scalar_outputs)
return tensor2float(scalar_outputs["loss"]), tensor2float(scalar_outputs), image_outputs, obj_avg_err, obj_ct
def warp(label, disp):
label = torch.tensor(label).reshape((1,1,540,960)).float().cuda()
disp_gt_rgb = disp.reshape((1,1,540,960)).cuda()
label_rgb = apply_disparity_cu(label, -disp_gt_rgb.int())
label_rgb = label_rgb.reshape((1,540,960))
label = label_rgb.cpu().numpy()[0]
return label
def obj_analysis(label, obj_ids, dep_gt, dep_est):
obj_avg_err = | np.zeros(17, dtype=int) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import wobble
import tensorflow as tf
from tqdm import tqdm
import h5py
import os
__all__ = ["improve_order_regularization", "improve_parameter", "test_regularization_value", "plot_pars_from_file"]
def get_name_from_tensor(tensor):
# hacky method to get rid of characters TF adds to the variable names
# NOTE - does not handle '_2' type additions!
# also won't work if you put colons in your variable names but why would you do that?
return str.split(tensor.name, ':')[0]
def improve_order_regularization(r, o, star_filename, tellurics_filename,
training_data, training_results,
validation_data, validation_results,
verbose=True, plot=False, basename='',
K_star=0, K_t=0, L1=True, L2=True,
tellurics_template_fixed=False):
"""
Use a validation scheme to determine the best regularization parameters for
all model components in a given order r.
Update files at star_filename, tellurics_filename with the best parameters.
"""
training_model = wobble.Model(training_data, training_results, r)
training_model.add_star('star', variable_bases=K_star)
if tellurics_template_fixed: # hackity hack hack
results_51peg = wobble.Results(filename='/Users/mbedell/python/wobble/results/results_51peg_Kstar0_Kt0.hdf5')
template_xs = np.copy(results_51peg.tellurics_template_xs[o])
template_ys = np.copy(results_51peg.tellurics_template_ys[o])
training_model.add_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=template_xs,
template_ys=template_ys)
else:
training_model.add_telluric('tellurics', rvs_fixed=True, variable_bases=K_t)
training_model.setup()
training_model.optimize(niter=0, verbose=verbose, rv_uncertainties=False)
if plot:
n = 0 # epoch to plot
title = 'Initialization'
filename = '{0}_init'.format(basename)
plot_fit(r, n, training_data, training_results, title=title, basename=filename)
validation_model = wobble.Model(validation_data, validation_results, r)
validation_model.add_star('star', variable_bases=K_star,
template_xs=training_results.star_template_xs[r]) # ensure templates are same size
if tellurics_template_fixed: # hackity hack hack
validation_model.add_telluric('tellurics', rvs_fixed=True, template_fixed=True,
variable_bases=K_t, template_xs=training_results.tellurics_template_xs[r],
template_ys=training_results.tellurics_template_ys[r])
else:
validation_model.add_telluric('tellurics', rvs_fixed=True, variable_bases=K_t,
template_xs=training_results.tellurics_template_xs[r])
validation_model.setup()
# the order in which these are defined will determine the order in which they are optimized:
tensors_to_tune = [training_model.components[1].L2_template_tensor, training_model.components[0].L2_template_tensor,
training_model.components[1].L1_template_tensor, training_model.components[0].L1_template_tensor]
tensor_names = ['L2_template', 'L2_template', 'L1_template',
'L1_template'] # this isonly needed bc TF appends garbage to the end of the tensor name
tensor_components = ['tellurics', 'star', 'tellurics', 'star'] # ^ same
if K_star > 0:
tensors_to_tune = np.append(tensors_to_tune, [training_model.components[0].L2_basis_vectors_tensor,
training_model.components[0].L1_basis_vectors_tensor])
tensor_names = np.append(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = np.append(tensor_components, ['star', 'star'])
if K_t > 0:
tensors_to_tune = np.append(tensors_to_tune, [training_model.components[1].L2_basis_vectors_tensor,
training_model.components[1].L1_basis_vectors_tensor])
tensor_names = np.append(tensor_names, ['L2_basis_vectors', 'L1_basis_vectors'])
tensor_components = np.append(tensor_components, ['tellurics', 'tellurics'])
regularization_dict = {}
#o_init = max(0, o-1) # initialize from previous order, or if o=0 use defaults
o_init = o # always initialize from starting guess (TODO: decide which init is better)
for i,tensor in enumerate(tensors_to_tune):
if tensor_components[i] == 'star':
filename = star_filename
elif tensor_components[i] == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r') as f:
regularization_dict[tensor] = np.copy(f[tensor_names[i]][o_init])
i = 0 # track order in which parameters are improved
for component,(tensor,name) in zip(tensor_components, zip(tensors_to_tune, tensor_names)):
if (name[0:2] == "L1" and L1) or (name[0:2] == "L2" and L2):
i += 1
regularization_dict[tensor] = improve_parameter(tensor, training_model, validation_model,
regularization_dict, validation_data, validation_results,
verbose=verbose,
plot=plot, basename=basename+'_par{0}'.format(i))
if component == 'star':
filename = star_filename
elif component == 'tellurics':
filename = tellurics_filename
else:
print("something has gone wrong.")
assert False
with h5py.File(filename, 'r+') as f:
f[name][o] = np.copy(regularization_dict[tensor])
if plot:
test_regularization_value(tensor, regularization_dict[tensor],
training_model, validation_model, regularization_dict,
validation_data, validation_results, plot=False, verbose=False) # hack to update results
title = 'Final'
filename = '{0}_final'.format(basename)
plot_fit(r, n, validation_data, validation_results, title=title, basename=filename)
fig = plt.figure()
ax = fig.add_subplot(111)
val_rvs = validation_results.star_rvs[r] + validation_results.bervs
train_rvs = training_results.star_rvs[r] + training_results.bervs
ax.plot(validation_results.dates, val_rvs - np.mean(val_rvs), 'r.')
ax.plot(training_results.dates, train_rvs - np.mean(train_rvs), 'k.', alpha=0.5)
ax.set_ylabel('RV (m/s)')
ax.set_xlabel('JD')
fig.tight_layout()
plt.savefig(basename+'_final_rvs.png')
plt.close(fig)
def improve_parameter(par, training_model, validation_model, regularization_dict,
validation_data, validation_results,
plot=False, verbose=True, basename=''):
"""
Perform a grid search to set the value of regularization parameter `par`.
Requires training data and validation data to evaluate goodness-of-fit for each parameter value.
Returns optimal parameter value.
"""
current_value = np.copy(regularization_dict[par])
if current_value == 0: # can't be scaled
return 0
name = str.split(par.name, ':')[0] # chop off TF's ID #
grid = np.logspace(-1.0, 1.0, num=3) * current_value
nll_grid = np.zeros_like(grid)
for i,val in enumerate(grid):
nll_grid[i] = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
# ensure that the minimum isn't on a grid edge:
best_ind = np.argmin(nll_grid)
while (best_ind == 0 and val >= 1.e-2): # prevent runaway minimization
val = grid[0]/10.
new_nll = test_regularization_value(par, val, training_model,
validation_model, regularization_dict,
validation_data, validation_results,
plot=plot, verbose=verbose, basename=basename)
grid = np.append(val, grid)
nll_grid = np.append(new_nll, nll_grid)
best_ind = | np.argmin(nll_grid) | numpy.argmin |
""" This module provides the basic classes for the pulse retrieval algorithms.
"""
import numpy as np
from types import SimpleNamespace
from .. import io
from ..mesh_data import MeshData
from ..pulse_error import pulse_error
from .. import lib
from ..pnps import BasePNPS
from math import isclose
# global dictionary that contains all PNPS classes
_RETRIEVER_CLASSES = {}
# =============================================================================
# Metaclass and factory
# =============================================================================
class MetaRetriever(type):
""" Metaclass that registers Retriever classes in a global dictionary.
"""
def __new__(cls, clsmethod, bases, attrs):
global _RETRIEVER_CLASSES
newclass = super().__new__(cls, clsmethod, bases, attrs)
method = newclass.method
if method is None:
return newclass
# register the Retriever method, e.g. 'copra'
if method in _RETRIEVER_CLASSES:
raise ValueError("Two retriever classes implement retriever '%s'."
% method)
_RETRIEVER_CLASSES[method] = newclass
return newclass
class MetaIORetriever(io.MetaIO, MetaRetriever):
# to fix metaclass conflicts
pass
# =============================================================================
# Retriever Base class
# =============================================================================
class BaseRetriever(io.IO, metaclass=MetaIORetriever):
""" The abstract base class for pulse retrieval.
This class implements common functionality for different retrieval
algorithms.
"""
method = None
supported_schemes = None
_io_store = ['pnps', 'options', 'logging', 'log',
'_retrieval_state', '_result']
def __init__(self, pnps, logging=False, verbose=False, **kwargs):
self.pnps = pnps
self.ft = self.pnps.ft
self.options = SimpleNamespace(**kwargs)
self._result = None
self.logging = logging
self.verbose = verbose
self.log = None
rs = self._retrieval_state = SimpleNamespace()
rs.running = False
if (self.supported_schemes is not None and
pnps.scheme not in self.supported_schemes):
raise ValueError("Retriever '%s' does not support scheme '%s'. "
"It only supports %s." %
(self.method, pnps.scheme, self.supported_schemes)
)
def retrieve(self, measurement, initial_guess, weights=None,
**kwargs):
""" Retrieve pulse from ``measurement`` starting at ``initial_guess``.
Parameters
----------
measurement : MeshData
A MeshData instance that contains the PNPS measurement. The first
axis has to correspond to the PNPS parameter, the second to the
frequency. The data has to be the measured _intensity_ over the
frequency (not wavelength!). The second axis has to match exactly
the frequency axis of the underlying PNPS instance. No
interpolation is done.
initial_guess : 1d-array
The spectrum of the pulse that is used as initial guess in the
iterative retrieval.
weights : 1d-array
Weights that are attributed to the measurement for retrieval.
In the case of (assumed) Gaussian uncertainties with standard
deviation sigma they should correspond to 1/sigma.
Not all algorithms support using the weights.
kwargs : dict
Can override retrieval options specified in :func:`__init__`.
Notes
-----
This function provides no interpolation or data processing. You have
to write a retriever wrapper for that purpose.
"""
self.options.__dict__.update(**kwargs)
if not isinstance(measurement, MeshData):
raise ValueError("measurement has to be a MeshData instance!")
self._retrieve_begin(measurement, initial_guess, weights)
self._retrieve()
self._retrieve_end()
def _retrieve_begin(self, measurement, initial_guess, weights):
pnps = self.pnps
if not np.allclose(pnps.process_w, measurement.axes[1], rtol=1e-6):
raise ValueError("Measurement has to lie on simulation grid!")
# Store measurement
self.measurement = measurement
self.parameter = measurement.axes[0]
self.Tmn_meas = measurement.data
self.initial_guess = initial_guess
# set the size
self.M, self.N = self.Tmn_meas.shape
# Setup the weights
if weights is None:
self._weights = np.ones((self.M, self.N))
else:
self._weights = weights.copy()
# Retrieval state
rs = self._retrieval_state
rs.approximate_error = False
rs.running = True
rs.steps_since_improvement = 0
# Initialize result
res = self._result = SimpleNamespace()
res.trace_error = self.trace_error(self.initial_guess)
res.approximate_error = False
res.spectrum = self.initial_guess.copy()
# Setup the logger
if self.logging:
log = self.log = SimpleNamespace()
log.trace_error = []
log.initial_guess = self.initial_guess.copy()
else:
self.log = None
if self.verbose:
print("Started retriever '%s'" % self.method)
print("Options:")
print(self.options)
print("Initial trace error R = {:.10e}".format(res.trace_error))
print("Starting retrieval...")
print()
def _retrieve_end(self):
rs = self._retrieval_state
rs.running = False
res = self._result
if res.approximate_error:
res.trace_error = self.trace_error(res.spectrum)
res.approximate_error = False
def _project(self, measured, Smk):
""" Performs the projection on the measured intensity.
"""
# in frequency domain
Smn = self.ft.forward(Smk)
# project and specially handle values with zero amplitude
absSmn = np.abs(Smn)
f = (absSmn > 0.0)
Smn[~f] = np.sqrt(measured[~f] + 0.0j)
Smn[f] = Smn[f] / absSmn[f] * | np.sqrt(measured[f] + 0.0j) | numpy.sqrt |
import numpy as np
import pandas as pd
class Planet():
"""
The class called Planet is initialised with constants appropriate
for the given target planet, including the atmospheric density profile
and other constants
"""
def __init__(self, atmos_func='exponential', atmos_filename=None,
Cd=1., Ch=0.1, Q=1e7, Cl=1e-3, alpha=0.3, Rp=6371e3,
g=9.81, H=8000., rho0=1.2):
"""
Set up the initial parameters and constants for the target planet
Parameters
----------
atmos_func : string, optional
Function which computes atmospheric density, rho, at altitude, z.
Default is the exponential function ``rho = rho0 exp(-z/H)``.
Options are ``exponential``, ``tabular``, ``constant`` and ``mars``
atmos_filename : string, optional
If ``atmos_func`` = ``'tabular'``, then set the filename of the table
to be read in here.
Cd : float, optional
The drag coefficient
Ch : float, optional
The heat transfer coefficient
Q : float, optional
The heat of ablation (J/kg)
Cl : float, optional
Lift coefficient
alpha : float, optional
Dispersion coefficient
Rp : float, optional
Planet radius (m)
rho0 : float, optional
Air density at zero altitude (kg/m^3)
g : float, optional
Surface gravity (m/s^2)
H : float, optional
Atmospheric scale height (m)
Returns
-------
None
"""
# Input constants
self.Cd = Cd
self.Ch = Ch
self.Q = Q
self.Cl = Cl
self.alpha = alpha
self.Rp = Rp
self.g = g
self.H = H
self.rho0 = rho0
self.tabular_dict = {}
if atmos_func == 'exponential':
def rhoa(z):
return self.rho0*np.exp(-z/self.H)
self.rhoa = rhoa
elif atmos_func == 'tabular':
adt_df = pd.read_csv(atmos_filename, sep=' ', skiprows=6, names=['z_i', 'rho_i', 'h_i'])
tabular_dict = dict(zip(adt_df.z_i,zip(adt_df.rho_i,adt_df.h_i)))
def rhoa(z):
index = float(int(z/10)*10)
return tabular_dict[index][0]*np.exp((index - z) \
/tabular_dict[index][1])
self.rhoa = rhoa
elif atmos_func == 'mars':
def rhoa(z):
p = 0.699 * np.exp(-0.00009 * z)
if z >= 7000:
T = 249.7 - 0.00222 * z
else:
T = 242.1 - 0.000998 * z
return p / (0.1921 * T)
self.rhoa = rhoa
elif atmos_func == 'constant':
self.rhoa = lambda x: rho0
else:
raise NotImplementedError
def impact(self, radius, velocity, density, strength, angle,
init_altitude=100e3, dt=0.05, radians=False):
"""
Solve the system of differential equations for a given impact event.
Also calculates the kinetic energy lost per unit altitude and
analyses the result to determine the outcome of the impact.
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
dt : float, optional
The output timestep, in s
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
A pandas DataFrame containing the solution to the system.
Includes the following columns:
``velocity``, ``mass``, ``angle``, ``altitude``,
``distance``, ``radius``, ``time``, ``dedz``
outcome : Dict
dictionary with details of airburst and/or cratering event.
For an airburst, this will contain the following keys:
``burst_peak_dedz``, ``burst_altitude``, ``burst_total_ke_lost``.
For a cratering event, this will contain the following keys:
``impact_time``, ``impact_mass``, ``impact_speed``.
All events should also contain an entry with the key ``outcome``,
which should contain one of the following strings:
``Airburst``, ``Cratering`` or ``Airburst and cratering``
"""
result = self.solve_atmospheric_entry(
radius=radius, angle=angle, strength=strength, velocity=velocity, density=density, init_altitude=init_altitude, dt=dt, radians=radians)
result = self.calculate_energy(result)
outcome = self.analyse_outcome(result)
return result, outcome
def solve_atmospheric_entry(
self, radius, velocity, density, strength, angle,
init_altitude=100e3, dt=0.05, radians=False):
"""
Solve the system of differential equations for a given impact scenario
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
dt : float, optional
The output timestep, in s
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
A pandas DataFrame containing the solution to the system.
Includes the following columns:
``velocity``, ``mass``, ``angle``, ``altitude``,
``distance``, ``radius``, ``time``
"""
# Enter your code here to solve the differential equations
tmax = 500 #10000
if radians == False:
angle = self.deg_to_rad(angle)
u0 = | np.array([velocity, density*4/3*np.pi*radius**3, angle, init_altitude, 0, radius]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn import metrics
def plot_roc(y_test,y_score):
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
#plt.show()
def sbp_roc(sbp_dat):
plt.figure(figsize=(6,6))
#plt.subplot(2,2,1,colspan=2)
plt.subplot2grid((3,2), (0,0), colspan=2,rowspan=2)
plot_roc(sbp_dat[:,0],sbp_dat[:,1])
plt.subplot2grid((3,2), (2,0))
plot_roc(sbp_dat[sbp_dat[:,2]<=0,0],sbp_dat[sbp_dat[:,2]<=0,1])
plt.subplot2grid((3,2), (2,1))
plot_roc(sbp_dat[sbp_dat[:,2]>0,0],sbp_dat[sbp_dat[:,2]>0,1])
plt.tight_layout()
def get_hm(clf,x,y):
return np.array(clf.predict(x) == y,dtype=int)
def get_hm_labelonly(clf,x,y,label=1):
# special HM calculation base only on a sub-group (denoted by label)
hm_ = (y == label) & (clf.predict(x) == y)
return | np.array(hm_,dtype=int) | numpy.array |
import logging
from abc import ABC
import numpy as np
from scipy.integrate import trapz
from scipy.interpolate import interp1d, splev, splrep
class PowerToCorrelation(ABC):
""" Generic class for converting power spectra to correlation functions
Using a class based method as there might be multiple implementations and
some of the implementations have state.
"""
def __call__(self, ks, pk, ss):
""" Generates the correlation function
Parameters
----------
ks : np.ndarray
The k values for the power spectrum data. *Assumed to be in log space*
pk : np.ndarray
The P(k) values
ss : np.nparray
The distances to calculate xi(s) at.
Returns
-------
xi : np.ndarray
The correlation function at the specified distances
"""
raise NotImplementedError()
class PowerToCorrelationGauss(PowerToCorrelation):
""" A pk2xi implementation using manual numeric integration with Gaussian dampening factor
"""
def __init__(self, ks, interpolateDetail=2, a=0.25):
super().__init__()
self.ks = ks
self.ks2 = np.logspace(np.log( | np.min(ks) | numpy.min |
from copy import deepcopy
import pytest
import tempfile
import numpy as np
from tests.utils import resample_data, marginalize_data, complete_binary_data, binary_data_ids, compute_mpe_ids
from tests.utils import random_marginalize_data, complete_posterior_binary_data, complete_marginalized_binary_data
from sklearn.datasets import load_diabetes
from deeprob.spn.utils.statistics import compute_statistics
from deeprob.spn.utils.filter import filter_nodes_by_type
from deeprob.spn.utils.validity import check_spn
from deeprob.spn.structure.node import Sum, Product
from deeprob.spn.structure.node import bfs, dfs_post_order, topological_order, topological_order_layered
from deeprob.spn.structure.cltree import BinaryCLT
from deeprob.spn.structure.leaf import Bernoulli, Gaussian
from deeprob.spn.structure.io import save_spn_json, load_spn_json
from deeprob.spn.learning.learnspn import learn_spn
from deeprob.spn.learning.wrappers import learn_estimator, learn_classifier
from deeprob.spn.algorithms.structure import prune, marginalize
from deeprob.spn.algorithms.inference import likelihood, log_likelihood, mpe
from deeprob.spn.algorithms.moments import expectation, variance, skewness, kurtosis, moment
@pytest.fixture
def data():
data, _, = load_diabetes(return_X_y=True)
return (data < np.median(data, axis=0)).astype(np.float32)
@pytest.fixture
def evi_data(data):
return resample_data(data, 1000)
@pytest.fixture
def mar_data(evi_data):
return random_marginalize_data(evi_data, 0.2)
@pytest.fixture
def clf_data(evi_data):
return marginalize_data(evi_data, [2])
@pytest.fixture
def scope_mar_data(evi_data):
scope = [5, 9, 8]
mar_scope = [s for s in range(10) if s not in scope]
return marginalize_data(evi_data, mar_scope)
@pytest.fixture
def binary_square_data():
return np.stack([
| np.random.binomial(1, 0.3, size=1000) | numpy.random.binomial |
import os
import numpy as np
from PIL import Image
from treasure_island import model
from settings import ROOT_DIR
def export_binary_image(arr: np.ndarray, color0: tuple = (255, 255, 255, 255),
color1: tuple = (18, 146, 175, 255), colornan: tuple = (0, 0, 0, 0),
fname: str = 'island_outline.png'):
img_rgba = np.zeros((arr.shape[0], arr.shape[1], 4), dtype=np.uint8)
img_rgba[arr == 0] = color0
img_rgba[arr == 1] = color1
img_rgba[ | np.isnan(arr) | numpy.isnan |
from copy import copy
from typing import List, Tuple, Union, Iterator, Iterable
import numpy as np
class Size:
def __init__(self, size: Union[int, None]):
"""
Create a Size object with the given size. If the size passed
in is None, then it is treated as infinite
:param size: the size
"""
self.size = size
def is_infinite(self) -> bool:
"""
Return True if this Size is infinite, and False otherwise
:return: whether or not this Size is infinite
"""
return self.size is None
class Rule:
def __init__(self, window: np.ndarray, center: Union[int, Tuple[int, ...]], becomes: int):
"""
Create a single evolution rule
:param window: matrix of dead (0) and alive (1) cells
:param center: index of the target cell in the window
:param becomes: what the target cell becomes in the next generation
"""
if isinstance(center, int):
center = (center,)
if len(center) != len(window.shape):
raise ValueError("Center must have the same dimensions as the window")
self.window = window
self.center = center
self.becomes = becomes
class RuleList:
def __init__(self, rules: Union[List[Rule], None] = None):
"""
Create a complete set of evolution rules for the universe
"""
self._rules = rules if rules is not None else []
def add_rule(self, rule: Rule) -> None:
"""
Add a Rule to the RuleList
:param rule: the Rule
:return: None
"""
self._rules.append(rule)
def __iter__(self) -> Iterator:
"""
Get an iterator over all Rules in the RuleList
:return: the Rule iterator
"""
return iter(self._rules)
class Universe:
def __init__(self, dimensions: int, size: Union[Size, None] = None, initial: Union[np.ndarray, None] = None):
"""
Create a (initially static) universe with cells that can be either dead (0) or alive (1)
:param dimensions: the dimensionality of the universe; can currently only
be 1 or 2
:param size: the size of the universe; currently cannot be infinite, and does not
have to be provided if initial state is provided
:param initial: the initial state of the universe; if not provided, then all
cells start off dead
"""
Universe.__check_dimensions(dimensions)
self._dimensions = dimensions
if (size is None and initial is None) or (size is not None and initial is not None):
raise ValueError("Exactly one of size or initial must be provided")
if size is not None:
if size.is_infinite():
raise ValueError("Infinite universes not yet supported")
self._size = size
if dimensions == 1:
self._universe = np.array([0] * size.size)
elif dimensions == 2:
self._universe = np.array([[0] * size.size for _ in range(size.size)])
else:
Universe.__check_initial(initial, dimensions)
self._size = initial.shape[0]
self._universe = np.copy(initial)
@staticmethod
def __check_dimensions(dimensions: int) -> None:
if dimensions <= 0:
raise ValueError("Universes must have at least one dimension")
elif dimensions > 2:
raise ValueError("Higher than two-dimensional universe not supported")
@staticmethod
def __check_initial(initial: np.ndarray, dimensions: int) -> None:
if len(initial.shape) != dimensions:
raise ValueError("Initial universe must have the same number of dimensions provided")
size = initial.shape[0]
if not | np.all(initial.shape == size) | numpy.all |
import cv2
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
import math
def draw_holes(a, b, color=(0, 0, 255)):
coords = zip(a, b)
for center in coords:
x, y = map(int, center)
cv.circle(img, (x, y), 7, color, 2)
theta = 0
theta = theta*np.pi/180
plt.figure(num=1, figsize=[6,6])
# Drawing the base circles
img = 255 * np.ones(shape=[720, 960, 3], dtype=np.uint8)
plate_center = (480, 360)
o1, o2 = plate_center
plate_radius = 300
cv2.circle(img, center=plate_center, radius=plate_radius, color=(200,100,100), thickness=5)
cv2.circle(img, center=plate_center, radius=32, color=(200,100,100), thickness=5)
t = np.linspace((2*np.pi/12)+theta, (2*np.pi/12)+2*np.pi+theta, 7)
for i in t:
outer_circle_offset = 42
a = outer_circle_offset*np.cos(i)+o1
b = outer_circle_offset*np.sin(i)+o2
cv2.circle(img, center=(int(a),int(b)), radius=5, color=(200,100,100), thickness=2)
# Drawing the three small circles
t1 = [theta, theta-np.pi, theta-(3*np.pi/2)]
small_circle_offset = 225
a1 = small_circle_offset*np.cos(t1)+o1
b1 = small_circle_offset*np.sin(t1)+o2
plt.imshow(img),plt.scatter(a1, b1, s=32)
draw_holes(a1, b1, color=(0, 0, 0))
print(list(zip(a1, b1)))
# Drawing the 22 outer circles
t2 = np.linspace((2*np.pi/44)+theta, (2*np.pi/44)+2*np.pi+theta, 23)
outer_circle_offset = 275
a2 = outer_circle_offset*np.cos(t2)+o1
b2 = outer_circle_offset*np.sin(t2)+o2
plt.imshow(img),plt.scatter(a2, b2, s=90, c ="white", edgecolor ="red")
draw_holes(a2, b2)
# Drawing the inner circles
t3 = np.linspace((np.pi/2)+theta, (np.pi/2)+2*np.pi+theta, 3)
outer_circle_offset = 160
a3 = outer_circle_offset*np.cos(t3)+o1
b3 = outer_circle_offset*np.sin(t3)+o2
plt.imshow(img),plt.scatter(a3, b3, s=90, c ="white", edgecolor ="red")
draw_holes(a3, b3)
t4 = [np.pi*73.3/180+theta, np.pi*106.7/180+theta, np.pi*253.3/180+theta, np.pi*286.7/180+theta]
outer_circle_offset = 104.4
a4 = outer_circle_offset*np.cos(t4)+o1
b4 = outer_circle_offset*np.sin(t4)+o2
plt.imshow(img),plt.scatter(a4, b4, s=90, c ="white", edgecolor ="red")
draw_holes(a4, b4)
t5 = [np.pi*69.44/180+theta, np.pi*110.56/180+theta, np.pi*249.44/180+theta, np.pi*290.56/180+theta]
outer_circle_offset = 170.88
a5 = outer_circle_offset*np.cos(t5)+o1
b5 = outer_circle_offset*np.sin(t5)+o2
plt.imshow(img),plt.scatter(a5, b5, s=90, c ="white", edgecolor ="red")
draw_holes(a5, b5)
t6 = [np.pi*48.1/180+theta, np.pi*131.99/180+theta, np.pi*228.01/180+theta, np.pi*311.99/180+theta]
outer_circle_offset = 134.54
a6 = outer_circle_offset*np.cos(t6)+o1
b6 = outer_circle_offset*np.sin(t6)+o2
plt.imshow(img),plt.scatter(a6, b6, s=90, c ="white", edgecolor ="red")
draw_holes(a6, b6)
t7 = [np.pi*53.13/180+theta, np.pi*126.87/180+theta, np.pi*233.13/180+theta, np.pi*306.87/180+theta]
outer_circle_offset = 200
a7 = outer_circle_offset* | np.cos(t7) | numpy.cos |
"""
Source code please refer to the following:
http://web.stanford.edu/~hrhakim/NMF/code.html
Description:
This file provides the functions used in implementing the proposed method
for Non-negative matrix factorization in the paper,
"Non-negative Matrix Factorization via Archetypal Analysis".
Link = https://arxiv.org/abs/1705.02994
Re-implemented into class-based code by:
<NAME> (<EMAIL>)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import nnls
from scipy.optimize import linprog
from hw3.libs.common.blend_dataset import BlendImgDataset
class NMF(BlendImgDataset):
def __init__(self, n_comp, o_img_size, shape, N, p):
self.n_comp = n_comp
super().__init__(o_img_size, df_dataset=True, shape=shape, N=N, p=p, all=True)
"""
Please go to the paper for the detail of the algorithm.
"""
def run(self, maxiter, delta, threshold, c1, c2, verbose, oracle):
self.W, self.H, self.L, self.Err = self.acc_palm_nmf(self.img_data.values, r=self.n_comp, maxiter=maxiter, delta=delta, threshold=threshold,
c1=c1, c2=c2, verbose=verbose, oracle=oracle)
def plot_result(self):
plt.figure()
plt.suptitle("Illustration of NMF features =%s from Zw (DR of X)" % self.n_comp)
for i in range(0, self.n_comp):
plt.subplot(1, 4, i + 1)
Vt_row = self.H[i, :].reshape(self.shape) # Reconstruct row into image for checkout
plt.title("H{}".format(i), size=8)
plt.imshow(Vt_row, cmap='gray') ## Display the image
plt.axis('off')
plt.tight_layout()
plt.show()
def D_distance(self, H1, H2):
# This function computes the 'L'-distance between the two set of vectors collected in the rows of H1 and H2. In our paper notation, this is $\mathscr{L}(H_1, H_2)$.
n1 = H1.shape[0]
n2 = H2.shape[0]
D = 0
for i in range(0, n1):
d = (np.linalg.norm(H1[i, :] - H2[0, :])) ** 2
for j in range(1, n2):
d = min(d, (np.linalg.norm(H1[i, :] - H2[j, :]) ** 2))
D = D + d
return D
# not used yet, in this implementation
def generate_weights(self, n, r, alpha, n_f, deg_prob):
# This function generates 'n' weight vectors in r-dimensions, distributed as Dirichlet(alpha, alpha, ..., alpha). 'n_f' is the number of weight vector which have zero components (induce points that lie on the faces) and 'deg_prob' is the distribution of the support size of these weight vectors. Namely, these weight vectors are distributed as Dirichlet over the set of nonzero entries which is a uniformly distributed set with a size randomly generated according to 'deg_prob'.
W = np.zeros((n, r))
for i in range(0, n_f):
deg_cdf = np.cumsum(deg_prob)
t = np.random.uniform(0, 1)
ind = np.nonzero(deg_cdf > t)
deg = np.min(ind) + 1
dirich_param = alpha * np.ones(deg)
w = np.random.dirichlet(dirich_param)
vertices = np.random.permutation(r)
vertices = vertices[0:deg]
W[i, vertices] = np.random.dirichlet(dirich_param)
for i in range(n_f, n):
dirich_param = alpha * np.ones(r)
W[i, :] = np.random.dirichlet(dirich_param)
return W
def l2distance(self, x, U, x0):
# This function computes <x-x0, (U^T*U)*(x-x0)>.
lx = np.linalg.norm(x - x0) ** 2
lpx = np.linalg.norm(np.dot(U, x - x0)) ** 2
return (lx - lpx)
def plot_H(self, H, col, type):
# This function plots the 'archetypes', (rows of 'H', when they are 2-dimensional) in 'col' color using 'type' as plot options.
v0 = H[:, 0]
v0 = np.append(v0, H[0, 0])
v1 = H[:, 1]
v1 = np.append(v1, H[0, 1])
hplt, = plt.plot(v0, v1, type, color=col, markersize=8, linewidth=3)
return hplt
def plot_data(self, X, col):
# This function plots the 'data points', (rows of 'X', when they are 2-dimensional) in 'col' color.
plt.plot(X[:, 0], X[:, 1], 'o', color=col, markersize=5)
def initH(self, X, r):
# This function computes 'r' initial archetypes given rows of 'X' as the data points. The method used here is the successive projections method explained in the paper.
n = X.shape[0]
d = X.shape[1]
H = np.zeros((r, d))
maxd = np.linalg.norm(X[0, :])
imax = 0
for i in range(1, n):
newd = np.linalg.norm(X[i, :])
if (newd > maxd):
imax = i
maxd = newd
H[0, :] = X[imax, :]
maxd = np.linalg.norm(X[0, :] - H[0, :])
imax = 0
for i in range(1, n):
newd = np.linalg.norm(X[i, :] - H[0, :])
if (newd > maxd):
imax = i
maxd = newd
H[1, :] = X[imax, :]
for k in range(2, r):
M = H[1:k, :] - np.outer(np.ones(k - 1), H[0, :])
[U, s, V] = | np.linalg.svd(M, full_matrices=False) | numpy.linalg.svd |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
'''
kpoint-adapted and spin-adapted MP2
t2[i,j,a,b] = <ij|ab> / D_ij^ab
t2 and eris are never stored in full, only a partial
eri of size (nkpts,nocc,nocc,nvir,nvir)
'''
import time
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf.mp import mp2
from pyscf.pbc.lib import kpts_helper
def kernel(mp, mo_energy, mo_coeff, verbose=logger.NOTE):
nocc = mp.nocc
nvir = mp.nmo - nocc
nkpts = mp.nkpts
eia = np.zeros((nocc,nvir))
eijab = np.zeros((nocc,nocc,nvir,nvir))
fao2mo = mp._scf.with_df.ao2mo
kconserv = mp.khelper.kconserv
emp2 = 0.
oovv_ij = np.zeros((nkpts,nocc,nocc,nvir,nvir), dtype=mo_coeff[0].dtype)
for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
orbo_i = mo_coeff[ki][:,:nocc]
orbo_j = mo_coeff[kj][:,:nocc]
orbv_a = mo_coeff[ka][:,nocc:]
orbv_b = mo_coeff[kb][:,nocc:]
oovv_ij[ka] = fao2mo((orbo_i,orbv_a,orbo_j,orbv_b),
(mp.kpts[ki],mp.kpts[ka],mp.kpts[kj],mp.kpts[kb]),
compact=False).reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3) / nkpts
for ka in range(nkpts):
kb = kconserv[ki,ka,kj]
eia = mo_energy[ki][:nocc].reshape(-1,1) - mo_energy[ka][nocc:]
ejb = mo_energy[kj][:nocc].reshape(-1,1) - mo_energy[kb][nocc:]
eijab = lib.direct_sum('ia,jb->ijab',eia,ejb)
t2_ijab = np.conj(oovv_ij[ka]/eijab)
woovv = 2*oovv_ij[ka] - oovv_ij[kb].transpose(0,1,3,2)
emp2 += np.einsum('ijab,ijab', t2_ijab, woovv).real
emp2 /= nkpts
return emp2, None
def _frozen_sanity_check(frozen, mo_occ, kpt_idx):
'''Performs a few sanity checks on the frozen array and mo_occ.
Specific tests include checking for duplicates within the frozen array
and making sure we didn't freeze either all the occupied orbitals or all
the unoccupied orbitals.
Args:
frozen (array_like of int): The orbital indices that will be frozen.
mo_occ (:obj:`ndarray` of int): The occupuation number for each orbital
resulting from a mean-field-like calculation.
kpt_idx (int): The k-point that `mo_occ` and `frozen` belong to.
'''
frozen = np.array(frozen)
nocc = np.count_nonzero(mo_occ > 0)
nvir = len(mo_occ) - nocc
assert nocc, 'No occupied orbitals?\n\nnocc = %s\nmo_occ = %s' % (nocc, mo_occ)
all_frozen_unique = (len(frozen) - len(np.unique(frozen))) == 0
if not all_frozen_unique:
raise RuntimeError('Frozen orbital list contains duplicates!\n\nkpt_idx %s\n'
'frozen %s' % (kpt_idx, frozen))
if len(frozen) > 0 and np.max(frozen) > len(mo_occ) - 1:
raise RuntimeError('Freezing orbital not in MO list!\n\nkpt_idx %s\n'
'frozen %s\nmax orbital idx %s' % (kpt_idx, frozen, len(mo_occ) - 1))
occ_idx = np.where(mo_occ > 0)
max_occ_idx = np.max(occ_idx)
frozen_nocc = len(frozen[frozen <= max_occ_idx])
if frozen_nocc >= nocc:
raise RuntimeError('Cannot freeze all occupied orbitals!:\n\n'
'kpt_idx %s\nfrozen %s\nmo_occ %s' % (kpt_idx, frozen, mo_occ))
frozen_nvir = len(frozen[frozen > max_occ_idx])
if frozen_nvir >= nvir:
raise RuntimeError('Cannot freeze all virtual orbitals!:\n\n'
'kpt_idx %s\nfrozen %s\nmo_occ %s' % (kpt_idx, frozen, mo_occ))
def get_nocc(mp, per_kpoint=False):
'''Number of occupied orbitals for k-point calculations.
Number of occupied orbitals for use in a calculation with k-points, taking into
account frozen orbitals.
Args:
mp (:class:`MP2`): An instantiation of an MP2, SCF, or other mean-field object.
per_kpoint (bool, optional): True returns the number of occupied
orbitals at each k-point. False gives the max of this list.
Returns:
nocc (int, list of int): Number of occupied orbitals. For return type, see description of arg
`per_kpoint`.
'''
if mp._nocc is not None:
return mp._nocc
if isinstance(mp.frozen, (int, np.integer)):
nocc = [(np.count_nonzero(mp.mo_occ[ikpt]) - mp.frozen) for ikpt in range(mp.nkpts)]
elif isinstance(mp.frozen[0], (int, np.integer)):
[_frozen_sanity_check(mp.frozen, mp.mo_occ[ikpt], ikpt) for ikpt in range(mp.nkpts)]
nocc = []
for ikpt in range(mp.nkpts):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(mp.frozen) <= max_occ_idx)
nocc.append(np.count_nonzero(mp.mo_occ[ikpt]) - frozen_nocc)
elif isinstance(mp.frozen[0], (list, np.ndarray)):
nkpts = len(mp.frozen)
if nkpts != mp.nkpts:
raise RuntimeError('Frozen list has a different number of k-points (length) than passed in mean-field/'
'correlated calculation. \n\nCalculation nkpts = %d, frozen list = %s '
'(length = %d)' % (mp.nkpts, mp.frozen, nkpts))
[_frozen_sanity_check(frozen, mo_occ, ikpt) for ikpt, frozen, mo_occ in zip(range(nkpts), mp.frozen, mp.mo_occ)]
nocc = []
for ikpt, frozen in enumerate(mp.frozen):
max_occ_idx = np.max(np.where(mp.mo_occ[ikpt] > 0))
frozen_nocc = np.sum(np.array(frozen) <= max_occ_idx)
nocc.append( | np.count_nonzero(mp.mo_occ[ikpt]) | numpy.count_nonzero |
import numpy as np
import random
import os
import glob
import matplotlib.pyplot as pp
from matplotlib import patches
#os.environ["CUDA_VISIBLE_DEVICES"]="-1" #disable Tensorflow GPU usage, these simple graphs run faster on CPU
import tensorflow as tf
import DRMM as DRMM
from matplotlib import rc
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--windowless', default=False,action='store_true')
parser.add_argument('--train', default=False,action='store_true')
parser.add_argument('--nIter', type=int, default=500000)
parser.add_argument('--nSamples', type=int, default=10)
parser.add_argument('--nData', type=int, default=1000000)
args = parser.parse_args()
nIter=args.nIter
#Tex font stuff commented out
#rc('text', usetex=True)
#rc('font',**{'family':'serif','serif':['Times']})
fontsize=16
#Globals
nLayers=10
nComponentsPerLayer=256
nData=args.nData
onlineDataGeneration=False #True is slower, but produces infinite minibatch data variety
rigType="fullbody"
useFloorIEQ=False
nBatch=256 #training minibatch size
nSamplingBatch = 64 #inference minibatch size
modelFileName="./IKTest/{}_model_{}x{}".format(rigType,nComponentsPerLayer,nLayers)
train=args.train or (not os.path.isfile(modelFileName+".index"))
#plot limits
ylim=[-0.1,2.0]
xlim=[-1,1]
#helper
def threshold(x,val):
return tf.clip_by_value(tf.sign(x-val),0.0,1.0)
#A bone class for constructing 2D articulated characters such as a robot arm.
#geometry is of shape [nPoints,2]
#pos is in parent local coordinates, denoting the point around which the geometry rotates
#it is assumed that the initial geometry corresponds to local angle 0
class Bone:
def __init__(self,pos,geometry,minAngle,maxAngle):
self.localPos=pos.copy()
if geometry is not None:
self.localGeometry=geometry.copy()
else:
self.localGeometry=None
self.geometry=None
self.minAngle=minAngle
self.maxAngle=maxAngle
self.children=[]
self.parent=None
self.localAngle=0
#transform point from local to global
def l2g(self,p):
returnAsVector=False
if len(p.shape)==1:
p=np.reshape(p,[1,-1])
returnAsVector=True
result=np.matmul(p,self.R)
result+=self.pos
#if self.parent is not None:
# result+=np.reshape(self.parent.pos,[1,-1])
if returnAsVector:
result=np.reshape(result,[2])
return result
def addChild(self,child):
self.children.append(child)
child.parent=self
def updateGlobal(self):
#update angle
self.angle=self.localAngle
if self.parent is not None:
self.angle+=self.parent.angle
#rotation matrix
self.R=np.array([[np.cos(self.angle),-np.sin(self.angle)],
[ | np.sin(self.angle) | numpy.sin |
import sys
import os
import traceback
import boto3
import numpy as np
import io
import argparse
from PIL import Image
import math
import shortuuid as su
import json
import scipy.ndimage as nd
from skimage import io
from skimage.filters import gaussian
from skimage.exposure import histogram
from skimage.filters import threshold_otsu
from skimage.morphology import binary_opening
from skimage.morphology import label
from skimage.exposure import histogram
import bioimageimage as bi
import bioimagepath as bp
import bioims
print("Args=")
print(sys.argv[1:])
print("==")
# """
# This script takes as input a multi-channel image and computes ROIs within that image
# using an approach designed for isolation biological cells. To find cell centers, it uses a single channel
# assumed to have good single/noise for cell nuclei. Having identified the center
# of each cell, it crops across all channels at that location and creates a mulit-channel
# numpy array of the cropped data.
# Basic steps for this process include:
# 1. Use of a flat-field background image to normalize the source image
# 2. Use of a logorithmic normalization function to better distribute variance across the intensity range
# 3. Use of traditional methods for identifying cell centers (e.g., Otsu's method and dialtion/erosion)
# Edge-case handling for cell centers is handled as follows:
# The size for the ROI is specified, which is uniformly applied to each dimension. Edge handling can be considered in three different cases.
# 'r' is the roisize, and r/2 is half of roisize.
# Case 1: the dimension is much larger than ROI size:
# If the entire ROI does not fit (i.e., if the center is closer than r/2 to the edge, then the ROI is discarded).
# Case 2: the dimension is smaller than the ROI size:
# Then the generated ROI object is simply limited in this dimension to the maximum size, and includes the full range, regardless of where the
# center is in that dimension.
# Case 3: the dimension is slightly larger than the ROI size:
# Then the generated ROI object is full-size, but the effective center is shifted to accomplish this. The "maximum shift" will be computed
# as follows: min( (r - DimSize/4), 0)
# The input is an imageId, from which any needed metadata can be obtained via the bioims tool.
# The output has two components:
# 1) a numpy array with all ROIs for a given image, each of which in turn is a multi-channel data structure
# 2) a json file listing the ROI coordinates for each ROI image wrt its source image
# The above output is generated for each image in the input manifest.
# Output ROI coordinate example:
# {
# roisize: { // channel, z, y, x
# z: xxx,
# y: xxx,
# x: xxx
# },
# roi: [
# {
# z: xxx,
# y: xxx,
# x: xxx
# }
# ]
# }
# """
# Example imageInfo:
# {'Item':
# {'trainCategory': 'moa',
# 'imageId': '115kKdtCLvJ9HodXCQDrU6',
# 'plateId': 'bWb5wnbxsPPUyTVhfjV8Wh',
# 'trainId': 'origin',
# 'depth': '1',
# 'plateSourceId': 'Week1_22401',
# 'bucket': 'bioimagesearchbbbc021stack-bbbc021bucket544c3e64-10ecnwo51127',
# 'experiment': 'BBBC021_v1',
# 'channelKeys': [
# {'name': 'dapi', 'keysuffix': 'Week1_22401/Week1_150607_G09_s3_w1F733F5D1-A112-40AD-B324-46E9D649D7B6.tif'},
# {'name': 'tubulin', 'keysuffix': 'Week1_22401/Week1_150607_G09_s3_w2C3850DE6-AF1C-4E87-B434-F67337C6BF5C.tif'},
# {'name': 'actin', 'keysuffix': 'Week1_22401/Week1_150607_G09_s3_w4413C1A1A-408E-4C10-8B4E-75BA019F4815.tif'}],
# 'wellId': 'oMKATjf4z3b6kxnsHtaajV',
# 'imageSourceId': 'Week1_150607_G09_s3_w1F733F5D1-A112-40AD-B324-46E9D649D7B6',
# 'messageId': 'a725deab-2417-4b7a-958a-fc215aa49292',
# 'searchReady': 'VALIDATED',
# 'height': '1024',
# 'width': '1280',
# 'wellSourceId': 'G09',
# 'channels': '3',
# 'key': '',
# 'createTimestamp': '1608160666210'
# }
# }
CONFIG_ROI_SIZE = "image-preprocessing-roi-size"
CONFIG_MIN_VOXELS = "image-preprocessing-min-voxels"
parser = argparse.ArgumentParser()
parser.add_argument('--region', type=str, help='AWS region')
parser.add_argument('--bucket', type=str, help='artifact bucket')
parser.add_argument('--imageId', type=str, help='imageId to process')
parser.add_argument('--embeddingName', type=str, help='Embedding name')
parser.add_argument('--describeStacks', type=str, help='Describe Stacks JSON', default='')
args = parser.parse_args()
print("region={} bucket={} imageId={} embeddingName={}".format(args.region, args.bucket, args.imageId, args.embeddingName))
hasDescribeStacks=False
if len(args.describeStacks) > 0:
hasDescribeStacks=True
print("describeStacks={}".format(args.describeStacks))
os.environ['AWS_DEFAULT_REGION'] = args.region
s3c = boto3.client('s3')
if hasDescribeStacks:
params = {
"bucket" : args.bucket,
"key" : args.describeStacks
}
imageManagementClient = bioims.client('image-management', params)
configurationClient = bioims.client('configuration', params)
labelClient = bioims.client('label', params)
else:
imageManagementClient = bioims.client('image-management')
configurationClient = bioims.client('configuration')
labelClient = bioims.client('label')
imageInfo1 = imageManagementClient.getImageInfo(args.imageId, "origin")
imageInfo = imageInfo1['Item']
isLabeled = False
if 'trainLabel' in imageInfo:
isLabeled = True
roisize = int(configurationClient.getParameter(CONFIG_ROI_SIZE))
minvoxels = int(configurationClient.getParameter(CONFIG_MIN_VOXELS))
segmentationChannelName = 'dapi'
# trainKey = "artifact/train/" + args.embeddingName + "/plate/" + imageInfo['plateId'] + "/image-" + args.imageId + "-train.npy"
# labelKey = "artifact/train/" + args.embeddingName + "/plate/" + imageInfo['plateId'] + "/image-" + args.imageId + "-label.npy"
# noLabelKey = "artifact/train/" + args.embeddingName + "/plate/" + imageInfo['plateId'] + "/image-" + args.imageId + "-label.NONE"
# roiKey = "artifact/train/" + args.embeddingName + "/plate/" + imageInfo['plateId'] + "/image-" + args.imageId + "-roi.json"
trainKey = bp.getTrainKey(args.embeddingName, imageInfo['plateId'], args.imageId)
labelKey = bp.getLabelKey(args.embeddingName, imageInfo['plateId'], args.imageId)
noLabelKey = bp.getNoLabelKey(args.embeddingName, imageInfo['plateId'], args.imageId)
subclassKey = bp.getSubclassKey(args.embeddingName, imageInfo['plateId'], args.imageId)
roiKey = bp.getRoiKey(args.embeddingName, imageInfo['plateId'], args.imageId)
if (bi.s3ObjectExists(args.bucket, trainKey) and
bi.s3ObjectExists(args.bucket, roiKey) and
bi.s3ObjectExists(args.bucket, subclassKey) and
(bi.s3ObjectExists(args.bucket, labelKey) or bi.s3ObjectExists(args.bucket, noLabelKey))):
print("All files exist - skipping")
sys.exit(0)
width = int(imageInfo['width'])
height = int(imageInfo['height'])
depth = int(imageInfo['depth'])
labelIndex = -1
subclassIndex = -1
if isLabeled:
labelDict = {}
labelList = labelClient.listLabels(imageInfo['trainCategory'])
for lc in labelList:
labelDict[lc[0]]=lc[1]
labelIndex = int(labelDict[imageInfo['trainLabel']])
subclassDict = {}
subclassList = labelClient.listLabels(imageInfo['trainSubclassType'])
for ls in subclassList:
subclassDict[ls[0]]=ls[1]
subclassValue = imageInfo['trainSubclass']
cnws ="".join(subclassValue.split())
c2 = cnws.replace('/','-')
subclassIndex = int(subclassDict[c2])
def getFlatFieldKeyForChannel(channelName):
return bp.getFlatFieldKeyForChannel(imageInfo['plateId'], args.embeddingName, channelName)
def findCentersFromLabels(labels):
centers=[]
maxLabel=labels.max()
labelCounts=np.zeros(shape=(maxLabel+1), dtype=np.int)
labelPositions=[]
for d in range(len(labels.shape)):
labelPositions.append(np.zeros(shape=(maxLabel+1), dtype=np.int))
for idx, v in np.ndenumerate(labels):
labelCounts[v] += 1
ix=0
for iv in idx:
labelPositions[ix][v] += iv
ix += 1
for lp in range(maxLabel+1):
# Skip background
if (lp!=0):
if (labelCounts[lp]>=minvoxels):
ca = []
ca.append(lp)
ca.append(labelCounts[lp])
for d in range(len(labels.shape)):
na = labelPositions[d]
ca.append(na[lp]/labelCounts[lp])
centers.append(ca)
return centers
def computeCellCenters(pixels):
if pixels.min()==pixels.max():
return []
otsuThreshold = threshold_otsu(pixels)
binaryPixels = pixels >= otsuThreshold
ed1=binary_opening(binaryPixels)
ed2=binary_opening(ed1)
ed2int = ed2.astype(np.int)
ed2Labels, labelCount = nd.label(ed2int)
centers=findCentersFromLabels(ed2Labels)
return centers
inputChannels = imageInfo['channelKeys']
# Initialize vars
normedImages = {}
pix_shape = []
input_arr = []
segment_index = 0
i=0
# For each channel, do an initial linear normalizations, and find the segmentation channel index
inputBucket = imageInfo['bucket']
flatFieldData = []
for inputChannel in inputChannels:
channelFullKey = imageInfo['key'] + inputChannel['keysuffix']
normedImage = bi.computeNormedImage(inputBucket, channelFullKey)
pix_shape.append(normedImage.shape)
input_arr.append(normedImage)
if inputChannel['name']==segmentationChannelName:
segment_index=i
flatFieldKey = getFlatFieldKeyForChannel(inputChannel['name'])
flatFieldData1 = bi.getNumpyArrayFromS3(args.bucket, flatFieldKey)
if len(flatFieldData1.shape)==2:
flatFieldData1 = np.expand_dims(flatFieldData1, axis=0)
fmin = flatFieldData1.min()
fmax = flatFieldData1.max()
flatFieldData.append(flatFieldData1)
i+=1
if not bi.checkPixShape(pix_shape):
sys.exit("Error: image shapes of channels do not match")
# For each channel, independently do logarithmic normalization using its flatfield image
input_data = | np.array(input_arr) | numpy.array |
import os
from . import utils
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
from scipy import exp
import operator
from copy import copy, deepcopy
from collections import defaultdict, Counter
import re
from pyteomics import parser, mass, fasta, auxiliary as aux, achrom
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import subprocess
from sklearn import linear_model
import tempfile
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Queue, Process, cpu_count
from itertools import chain
try:
import seaborn
seaborn.set(rc={'axes.facecolor':'#ffffff'})
seaborn.set_style('whitegrid')
except:
pass
from .utils import calc_sf_all, recalc_spc
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import zscore, spearmanr
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
from pyteomics import electrochem
import numpy as np
import random
SEED = 42
from sklearn.model_selection import train_test_split
from os import path, mkdir
from collections import Counter, defaultdict
import warnings
import pylab as plt
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import os
from collections import Counter, defaultdict
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import warnings
import numpy as np
import matplotlib
import numpy
import pandas
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn import (
feature_extraction, feature_selection, decomposition, linear_model,
model_selection, metrics, svm
)
import scipy
from scipy.stats import rankdata
from copy import deepcopy
import csv
from scipy.stats import rankdata
import lightgbm as lgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import time as timemodule
import ast
from sklearn import metrics
SEED = 50
def worker_RT(qin, qout, shift, step, RC=False, elude_path=False, ns=False, nr=False, win_sys=False):
pepdict = dict()
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
outtest.write(item + '\n')
start += step
outtest.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtest_name, '-a', '-o', outres_name])
for x in open(outres_name).readlines()[3:]:
seq, RT = x.strip().split('\t')
pepdict[seq] = float(RT)
else:
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
pepdict[item] = achrom.calculate_RT(item, RC)
start += step
if win_sys:
return pepdict
else:
qout.put(pepdict)
qout.put(None)
def final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, nproc, fname=False):
n = nproc
prots_spc_basic = dict()
p1 = set(resdict['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict['seqs'], resdict['ids']):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
if not p0:
p0 = float(p)
prots_spc_tmp = dict()
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_tmp[k] = all_pvals[idx]
sortedlist_spc = sorted(prots_spc_tmp.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full_noexclusion.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
for bprot in pept_prot[pep]:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
if prot_fdr >= 12.5 * fdr:
v_arr = | np.array([prots_spc[k] for k in names_arr]) | numpy.array |
"""
"""
from numbers import Real
from typing import Union, Optional, Any, List, Tuple, Sequence
import numpy as np
from easydict import EasyDict as ED
from utils import dict_to_str
from cfg import BaseCfg
__all__ = [
"CPSC2020_loss",
"CPSC2020_score",
"eval_score",
]
def CPSC2020_loss(y_true:np.ndarray, y_pred:np.ndarray, y_indices:np.ndarray, dtype:type=str, verbose:int=0) -> int:
""" finished, updated with the latest (updated on 2020.8.31) official function
Parameters:
-----------
y_true: ndarray,
array of ground truth of beat types
y_true: ndarray,
array of predictions of beat types
y_indices: ndarray,
indices of beat (rpeak) in the original ecg signal
dtype: type, default str,
dtype of `y_true` and `y_pred`
Returns:
--------
total_loss: int,
the total loss of all ectopic beat types (SPB, PVC)
"""
classes = ['S', 'V']
truth_arr = {}
pred_arr = {}
if dtype == str:
for c in classes:
truth_arr[c] = y_indices[np.where(y_true==c)[0]]
pred_arr[c] = y_indices[np.where(y_pred==c)[0]]
elif dtype == int:
for c in classes:
truth_arr[c] = y_indices[np.where(y_true==BaseCfg.class_map[c])[0]]
pred_arr[c] = y_indices[np.where(y_pred==BaseCfg.class_map[c])[0]]
true_positive = {c: 0 for c in classes}
for c in classes:
for tc in truth_arr[c]:
pc = np.where(abs(pred_arr[c]-tc) <= BaseCfg.bias_thr)[0]
if pc.size > 0:
true_positive[c] += 1
false_positive = {
c: len(pred_arr[c]) - true_positive[c] for c in classes
}
false_negative = {
c: len(truth_arr[c]) - true_positive[c] for c in classes
}
false_positive_loss = {c: 1 for c in classes}
false_negative_loss = {c: 5 for c in classes}
if verbose >= 1:
print(f"true_positive = {dict_to_str(true_positive)}")
print(f"false_positive = {dict_to_str(false_positive)}")
print(f"false_negative = {dict_to_str(false_negative)}")
total_loss = sum([
false_positive[c] * false_positive_loss[c] + false_negative[c] * false_negative_loss[c] \
for c in classes
])
return total_loss
def CPSC2020_score(spb_true:List[np.ndarray], pvc_true:List[np.ndarray], spb_pred:List[np.ndarray], pvc_pred:List[np.ndarray], verbose:int=0) -> Union[Tuple[int],dict]:
""" finished, checked,
Score Function for all (test) records
Parameters:
-----------
spb_true, pvc_true, spb_pred, pvc_pred: list of ndarray,
verbose: int
Returns:
--------
retval: tuple or dict,
tuple of (negative) scores for each ectopic beat type (SPB, PVC), or
dict of more scoring details, including
- total_loss: sum of loss of each ectopic beat type (PVC and SPB)
- true_positive: number of true positives of each ectopic beat type
- false_positive: number of false positives of each ectopic beat type
- false_negative: number of false negatives of each ectopic beat type
"""
s_score = np.zeros([len(spb_true), ], dtype=int)
v_score = np.zeros([len(spb_true), ], dtype=int)
true_positive = ED({'S':0, 'V':0})
false_positive = ED({'S':0, 'V':0})
false_negative = ED({'S':0, 'V':0})
## Scoring ##
for i, (s_ref, v_ref, s_pos, v_pos) in enumerate(zip(spb_true, pvc_true, spb_pred, pvc_pred)):
s_tp = 0
s_fp = 0
s_fn = 0
v_tp = 0
v_fp = 0
v_fn = 0
# SPB
if s_ref.size == 0:
s_fp = len(s_pos)
else:
for m, ans in enumerate(s_ref):
s_pos_cand = np.where(abs(s_pos-ans) <= BaseCfg.bias_thr)[0]
if s_pos_cand.size == 0:
s_fn += 1
else:
s_tp += 1
s_fp += len(s_pos_cand) - 1
# PVC
if v_ref.size == 0:
v_fp = len(v_pos)
else:
for m, ans in enumerate(v_ref):
v_pos_cand = np.where(abs(v_pos-ans) <= BaseCfg.bias_thr)[0]
if v_pos_cand.size == 0:
v_fn += 1
else:
v_tp += 1
v_fp += len(v_pos_cand) - 1
# calculate the score
s_score[i] = s_fp * (-1) + s_fn * (-5)
v_score[i] = v_fp * (-1) + v_fn * (-5)
if verbose >= 3:
print(f"for the {i}-th record")
print(f"s_tp = {s_tp}, s_fp = {s_fp}, s_fn = {s_fn}")
print(f"v_tp = {v_tp}, v_fp = {v_fp}, v_fn = {v_fn}")
print(f"s_score[{i}] = {s_score[i]}, v_score[{i}] = {v_score[i]}")
true_positive.S += s_tp
true_positive.V += v_tp
false_positive.S += s_fp
false_positive.V += v_fp
false_negative.S += s_fn
false_negative.V += v_fn
Score1 = np.sum(s_score)
Score2 = np.sum(v_score)
if verbose >= 1:
retval = ED(
total_loss=-(Score1+Score2),
class_loss={'S':-Score1, 'V':-Score2},
true_positive=true_positive,
false_positive=false_positive,
false_negative=false_negative,
)
else:
retval = Score1, Score2
return retval
# -------------------------------------------------------
# the following are borrowed from CINC2020
# for classification of segments of ECGs using ECG_CRNN
def eval_score(classes:List[str], truth:Sequence, binary_pred:Sequence, scalar_pred:Sequence) -> Tuple[float]:
""" finished, checked,
for classification of segments of ECGs
Parameters:
-----------
classes: list of str,
list of all the classes, in the format of abbrevations
truth: sequence,
ground truth array, of shape (n_records, n_classes), with values 0 or 1
binary_pred: sequence,
binary predictions, of shape (n_records, n_classes), with values 0 or 1
scalar_pred: sequence,
probability predictions, of shape (n_records, n_classes), with values within [0,1]
Returns:
--------
auroc: float,
auprc: float,
accuracy: float,
f_measure: float,
f_beta_measure: float,
g_beta_measure: float,
"""
_truth = np.array(truth)
_binary_pred = np.array(binary_pred)
_scalar_pred = np.array(scalar_pred)
print('- AUROC and AUPRC...')
auroc, auprc = compute_auc(_truth, _scalar_pred)
print('- Accuracy...')
accuracy = compute_accuracy(_truth, _binary_pred)
print('- F-measure...')
f_measure = compute_f_measure(_truth, _binary_pred)
print('- F-beta and G-beta measures...')
f_beta_measure, g_beta_measure = compute_beta_measures(_truth, _binary_pred, beta=2)
print('Done.')
# Return the results.
return auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure
# Compute recording-wise accuracy.
def compute_accuracy(labels:np.ndarray, outputs:np.ndarray) -> float:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
num_correct_recordings = 0
for i in range(num_recordings):
if np.all(labels[i, :]==outputs[i, :]):
num_correct_recordings += 1
return float(num_correct_recordings) / float(num_recordings)
# Compute confusion matrices.
def compute_confusion_matrices(labels:np.ndarray, outputs:np.ndarray, normalize:bool=False) -> np.ndarray:
""" checked,
"""
# Compute a binary confusion matrix for each class k:
#
# [TN_k FN_k]
# [FP_k TP_k]
#
# If the normalize variable is set to true, then normalize the contributions
# to the confusion matrix by the number of labels per recording.
num_recordings, num_classes = np.shape(labels)
if not normalize:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
for j in range(num_classes):
if labels[i, j]==1 and outputs[i, j]==1: # TP
A[j, 1, 1] += 1
elif labels[i, j]==0 and outputs[i, j]==1: # FP
A[j, 1, 0] += 1
elif labels[i, j]==1 and outputs[i, j]==0: # FN
A[j, 0, 1] += 1
elif labels[i, j]==0 and outputs[i, j]==0: # TN
A[j, 0, 0] += 1
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
else:
A = np.zeros((num_classes, 2, 2))
for i in range(num_recordings):
normalization = float(max(np.sum(labels[i, :]), 1))
for j in range(num_classes):
if labels[i, j]==1 and outputs[i, j]==1: # TP
A[j, 1, 1] += 1.0/normalization
elif labels[i, j]==0 and outputs[i, j]==1: # FP
A[j, 1, 0] += 1.0/normalization
elif labels[i, j]==1 and outputs[i, j]==0: # FN
A[j, 0, 1] += 1.0/normalization
elif labels[i, j]==0 and outputs[i, j]==0: # TN
A[j, 0, 0] += 1.0/normalization
else: # This condition should not happen.
raise ValueError('Error in computing the confusion matrix.')
return A
# Compute macro F-measure.
def compute_f_measure(labels:np.ndarray, outputs:np.ndarray) -> float:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
A = compute_confusion_matrices(labels, outputs)
f_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if 2 * tp + fp + fn:
f_measure[k] = float(2 * tp) / float(2 * tp + fp + fn)
else:
f_measure[k] = float('nan')
macro_f_measure = np.nanmean(f_measure)
return macro_f_measure
# Compute F-beta and G-beta measures from the unofficial phase of the Challenge.
def compute_beta_measures(labels:np.ndarray, outputs:np.ndarray, beta:Real) -> Tuple[float, float]:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
A = compute_confusion_matrices(labels, outputs, normalize=True)
f_beta_measure = np.zeros(num_classes)
g_beta_measure = np.zeros(num_classes)
for k in range(num_classes):
tp, fp, fn, tn = A[k, 1, 1], A[k, 1, 0], A[k, 0, 1], A[k, 0, 0]
if (1+beta**2)*tp + fp + beta**2*fn:
f_beta_measure[k] = float((1+beta**2)*tp) / float((1+beta**2)*tp + fp + beta**2*fn)
else:
f_beta_measure[k] = float('nan')
if tp + fp + beta*fn:
g_beta_measure[k] = float(tp) / float(tp + fp + beta*fn)
else:
g_beta_measure[k] = float('nan')
macro_f_beta_measure = np.nanmean(f_beta_measure)
macro_g_beta_measure = np.nanmean(g_beta_measure)
return macro_f_beta_measure, macro_g_beta_measure
# Compute macro AUROC and macro AUPRC.
def compute_auc(labels:np.ndarray, outputs:np.ndarray) -> Tuple[float, float]:
""" checked,
"""
num_recordings, num_classes = np.shape(labels)
# Compute and summarize the confusion matrices for each class across at distinct output values.
auroc = | np.zeros(num_classes) | numpy.zeros |
#!/usr/bin/env python
import rospy
from twisted.internet import defer
from nav_msgs.msg import Odometry
from geographic_msgs.msg import GeoPath
from navigator_msgs.srv import ChooseAnimal, ChooseAnimalResponse
from mil_tools import rosmsg_to_numpy
from robot_localization.srv import FromLL
import numpy as np
import math
import tf
#Defines a service that returns the position of the acoustic beacon from odom and the range_bearing topic
class CircleAnimal():
def __init__(self):
rospy.init_node("circle_animal")
self.odom = rospy.Subscriber('/odom', Odometry, self.odometrySubscriber)
self.animals = rospy.Subscriber('/vrx/wildlife/animals/poses', GeoPath, self.animalSubscriber)
self.pub = rospy.Publisher('/trajectory_long/cmd', Odometry, queue_size=10)
self.serv = rospy.Service('/choose_animal', ChooseAnimal, self.handler)
self.from_lla = rospy.ServiceProxy("/fromLL", FromLL)
self.boat_pos = np.array([])
self.boat_ori = np.array([])
self.target_animal = ""
self.new_animal_pose = False
self.animal_pos = np.array([])
self.animal_ori = np.array([])
rospy.spin()
#Requires both odom and range_bearing to be publishing data
def odometrySubscriber(self, msg):
pos = msg.pose.pose.position
ori = msg.pose.pose.orientation
self.boat_pos = rosmsg_to_numpy(pos)
self.boat_ori = rosmsg_to_numpy(ori)
def animalSubscriber(self, msg):
#if target animal hasn't been selected, do not provide more data
if(self.target_animal == "" or self.new_animal_pose == True):
return
for animal in msg.poses:
if self.target_animal == animal.header.frame_id:
self.animal_pos, self.animal_ori = self.geo_pose_to_enu_pose(animal.pose)
self.new_animal_pose = True
def handler(self, req):
#define messages
self.target_animal = req.target_animal
res = ChooseAnimalResponse()
traj = Odometry()
rate = rospy.Rate(5)
self.new_animal_pose = False
print("Starting Service")
while (self.boat_pos.size == 0) or (self.animal_pos.size == 0):
rate.sleep()
print("Odom and Animal has been found")
#create goal pose and vector used for creating circle poses
radius = 6
granularity = 8
if self.target_animal != "crocodile":
start_circle_pos = self.closest_point_on_radius(self.boat_pos, self.animal_pos, radius)
if self.target_animal == "crocodile":
start_circle_pos = self.closest_point_on_radius(self.boat_pos, self.animal_pos, 15)
start_circle_ori = self.point_at_goal(start_circle_pos, self.animal_pos)
vect = np.array([ start_circle_pos[0] - self.animal_pos[0], start_circle_pos[1] - self.animal_pos[1]])
#go to animal
while ( (abs(self.boat_pos[0] - start_circle_pos[0]) > 0.5) ) or ( (abs(self.boat_pos[1] - start_circle_pos[1]) > 0.5) ):
self.new_animal_pose = False
#create boat trajectory
traj.header.frame_id = "enu"
traj.child_frame_id = "wamv/base_link"
traj.pose.pose.position.x = start_circle_pos[0]
traj.pose.pose.position.y = start_circle_pos[1]
traj.pose.pose.orientation.x = start_circle_ori[0]
traj.pose.pose.orientation.y = start_circle_ori[1]
traj.pose.pose.orientation.z = start_circle_ori[2]
traj.pose.pose.orientation.w = start_circle_ori[3]
self.pub.publish(traj)
while(not self.new_animal_pose):
rate.sleep()
#update trajectory point
if self.target_animal != "crocodile":
start_circle_pos = self.closest_point_on_radius(self.boat_pos, self.animal_pos, radius)
if self.target_animal == "crocodile":
start_circle_pos = self.closest_point_on_radius(self.boat_pos, self.animal_pos, 15)
start_circle_ori = self.point_at_goal(start_circle_pos, self.animal_pos)
if self.target_animal == "crocodile":
steps = granularity / 4
else:
steps = granularity
for i in range(steps+1):
print(i)
#calculate new position by rotating vector
if req.circle_direction == "cw":
vect = self.rotate_vector(vect, math.radians(-360/granularity) )
elif req.circle_direction == "ccw":
vect = self.rotate_vector(vect, math.radians(360/granularity) )
new_pos = self.animal_pos[0:2] + vect
circle_ori = self.point_at_goal(new_pos, self.animal_pos)
while ( (abs(self.boat_pos[0] - new_pos[0]) > 0.5) ) or ( (abs(self.boat_pos[1] - new_pos[1]) > 0.5) ):
self.new_animal_pose = False
#create boat trajectory
traj.header.frame_id = "enu"
traj.child_frame_id = "wamv/base_link"
traj.pose.pose.position.x = new_pos[0]
traj.pose.pose.position.y = new_pos[1]
traj.pose.pose.orientation.x = circle_ori[0]
traj.pose.pose.orientation.y = circle_ori[1]
traj.pose.pose.orientation.z = circle_ori[2]
traj.pose.pose.orientation.w = circle_ori[3]
self.pub.publish(traj)
while(not self.new_animal_pose):
rate.sleep()
new_pos = self.animal_pos[0:2] + vect
circle_ori = self.point_at_goal(new_pos, self.animal_pos)
print("arrived")
self.target_animal = ""
self.boat_pos = | np.array([]) | numpy.array |
import numpy as np
from ..camera import Camera
from ..photoelectrons import Photoelectrons
from .cherenkov import get_cherenkov_shower_image
__all__ = ["PhotoelectronSource"]
class PhotoelectronSource:
def __init__(self, camera, seed=None):
"""
Collection of methods which simulate illumination sources and the
detection of the photons by the photosensors.
Each method returns a :class:`Photoelectrons` object, which is a
container of 1D arrays describing the pixel, arrival time, and
reported charge of each photoelectron.
Parameters
----------
camera : Camera
Description of the camera
seed : int or tuple
Seed for the numpy random number generator.
Ensures the reproducibility of an event if you know its seed
"""
self.camera = camera
self.seed = seed
def get_nsb(self, rate):
"""
Obtain the photoelectron arrays for random Night-Sky Background light
Parameters
----------
rate : float
NSB rate in MHz (number of photoelectrons per microsecond)
This is the rate after already accounting for Photon Detection Efficiency
Returns
-------
Photoelectrons
Container for the NSB photoelectron arrays
"""
rng = np.random.default_rng(seed=self.seed)
# Number of NSB photoelectrons per pixel in this event
duration = self.camera.continuous_readout_duration
n_pixels = self.camera.mapping.n_pixels
avg_photons_per_waveform = rate * 1e6 * duration * 1e-9
n_nsb_per_pixel = rng.poisson(avg_photons_per_waveform, n_pixels)
# Pixel containing each photoelectron
pixel = np.repeat(np.arange(n_pixels), n_nsb_per_pixel)
# Uniformly distribute NSB photoelectrons in time across waveform
n_photoelectrons = pixel.size
time = rng.uniform(0, duration, size=n_photoelectrons)
# Create initial photoelectrons
charge = np.ones(n_photoelectrons)
initial_pe = Photoelectrons(pixel=pixel, time=time, charge=charge)
# Process the photoelectrons through the SPE spectrum
pe = self.camera.photoelectron_spectrum.apply(initial_pe, rng)
return pe
def get_uniform_illumination(self, time, illumination, laser_pulse_width=0):
"""
Simulate the camera being illuminated by a uniform light (which already
accounts for the focal plane curvature).
Parameters
----------
time : float
Arrival time of the light at the focal plane
illumination : float
Average illumination in number of photoelectrons
laser_pulse_width : float
Width of the pulse from the illumination source
Returns
-------
Photoelectrons
Container for the photoelectron arrays
"""
rng = | np.random.default_rng(seed=self.seed) | numpy.random.default_rng |
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from datetime import datetime
from PIL import Image as im
def to_pandas(data, normalize=True, use_normalization=[]):
"""
:param data:
:param normalize:
:param use_normalization: pre-calculated normalization scaler. If this parameter is given, the function uses it to normalize.
:return:
"""
cols = [
'center1x','center1y','center2x','center2y','center3x','center3y',
'angle1','angle2','angle3',
'width1','width2','width3',
'height1','height2','height3',
'mode0','mode1',
'width_in_hand',
'height_in_hand'
]
np_data = np.zeros((len(data), len(cols)))
count = 0
for d in data:
center = np.array(d['center']).flatten()
angle = np.array(d['angle'])
width = np.array(d['width'])
height = np.array(d['height'])
# Turn two modes into one hot encoding
mode = np.array([1, 0]) if d['mode'] == 0 else np.array([0, 1])
width_in_hand = | np.array(d['width_in_hand']) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imaging
improve:
reinit, uncert,
rand_norm, rand_splitnorm, rand_pointing,
slice, slice_inv_sq, crop, rebin, groupixel
smooth, artifact, mask
Jy_per_pix_to_MJy_per_sr(improve):
header, image, wave
iuncert(improve):
unc
islice(improve):
image, wave, filenames, clean
icrop(improve):
header, image, wave
irebin(improve):
header, image, wave
igroupixel(improve):
header, image, wave
ismooth(improve):
header, image, wave
imontage(improve):
reproject, reproject_mc, coadd, clean
iswarp(improve):
footprint, combine, combine_mc, clean
iconvolve(improve):
spitzer_irs, choker, do_conv, image, wave,
filenames, clean
cupid(improve):
spec_build, sav_build,
header, image, wave
wmask, wclean, interfill, hextract, hswarp,
concatenate
"""
from tqdm import tqdm, trange
import os
import math
import numpy as np
from scipy.io import readsav
from scipy.interpolate import interp1d
from astropy import wcs
from astropy.io import ascii
from astropy.table import Table
from reproject import reproject_interp, reproject_exact, reproject_adaptive
from reproject.mosaicking import reproject_and_coadd
import subprocess as SP
import warnings
# warnings.filterwarnings("ignore", category=RuntimeWarning)
# warnings.filterwarnings("ignore", message="Skipping SYSTEM_VARIABLE record")
## Local
from utilities import InputError
from inout import (fitsext, csvext, ascext, fclean,
read_fits, write_fits, savext, write_hdf5,
# read_csv, write_csv, read_ascii,
)
from arrays import listize, closest, pix2sup, sup2pix
from maths import nanavg, bsplinterpol
from astrom import fixwcs, get_pc, pix2sr
##-----------------------------------------------
##
## <improve> based tools
##
##-----------------------------------------------
class improve:
'''
IMage PROcessing VEssel
'''
def __init__(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
self: filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
if self.im is not None:
self.Ndim = self.im.ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
if self.hdr is not None:
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Raw size (pix): {} * {}'.format(self.Nx, self.Ny))
def reinit(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
Update init variables
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
self.Ndim = self.im.ndim
self.hdr['NAXIS'] = self.Ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
del self.hdr['NAXIS3']
else:
self.hdr['NAXIS3'] = self.Nw
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
self.hdr['NAXIS2'] = self.Ny
self.hdr['NAXIS1'] = self.Nx
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Image size (pix): {} * {}'.format(self.Nx, self.Ny))
def uncert(self, filOUT=None, filUNC=None, filWGT=None, wfac=1.,
BG_image=None, BG_weight=None, zerovalue=np.nan):
'''
Estimate uncertainties from the background map
So made error map is uniform/weighted
------ INPUT ------
filOUT output uncertainty map (FITS)
filUNC input uncertainty map (FITS)
filWGT input weight map (FITS)
wfac multiplication factor for filWGT (Default: 1)
BG_image background image array used to generate unc map
BG_weight background weight array
zerovalue value used to replace zero value (Default: NaN)
------ OUTPUT ------
unc estimated unc map
'''
if filUNC is not None:
unc = read_fits(filUNC).data
else:
if BG_image is not None:
im = BG_image
Ny, Nx = BG_image.shape
else:
im = self.im
Ny = self.Ny
Nx = self.Nx
Nw = self.Nw
## sigma: std dev of (weighted) flux distribution of bg region
if BG_weight is not None:
if self.Ndim==3:
sigma = np.nanstd(im * BG_weight, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im * BG_weight)
else:
if self.Ndim==3:
sigma = np.nanstd(im, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im)
## wgt: weight map
if filWGT is not None:
wgt = read_fits(filWGT).data * wfac
else:
wgt = np.ones(self.im.shape) * wfac
## unc: weighted rms = root of var/wgt
if self.Ndim==3:
unc = []
for w in range(Nw):
unc.append(np.sqrt(1./wgt[w,:,:]) * sigma(w))
unc = np.array(unc)
elif self.Ndim==2:
unc = np.sqrt(1./wgt) * sigma
## Replace zero values
unc[unc==0] = zerovalue
self.unc = unc
if filOUT is not None:
write_fits(filOUT, self.hdr, unc, self.wvl, self.wmod)
return unc
def rand_norm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random N(0,1) noise
'''
if filUNC is not None:
unc = read_fits(filUNC).data
if unc is not None:
## unc should have the same dimension with im
theta = np.random.normal(mu, sigma, self.im.shape)
self.im += theta * unc
return self.im
def rand_splitnorm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random SN(0,lam,lam*tau) noise
------ INPUT ------
filUNC 2 FITS files for unc of left & right sides
unc 2 uncertainty ndarrays
------ OUTPUT ------
'''
if filUNC is not None:
unc = []
for f in filUNC:
unc.append(read_fits(f).data)
if unc is not None:
## unc[i] should have the same dimension with self.im
tau = unc[1]/unc[0]
peak = 1/(1+tau)
theta = np.random.normal(mu, sigma, self.im.shape) # ~N(0,1)
flag = np.random.random(self.im.shape) # ~U(0,1)
if self.Ndim==2:
for x in range(self.Nx):
for y in range(self.Ny):
if flag[y,x]<peak[y,x]:
self.im[y,x] += -abs(theta[y,x]) * unc[0][y,x]
else:
self.im[y,x] += abs(theta[y,x]) * unc[1][y,x]
elif self.Ndim==3:
for x in range(self.Nx):
for y in range(self.Ny):
for k in range(self.Nw):
if flag[k,y,x]<peak[k,y,x]:
self.im[k,y,x] += -abs(
theta[k,y,x]) * unc[0][k,y,x]
else:
self.im[k,y,x] += abs(
theta[k,y,x]) * unc[1][k,y,x]
return self.im
def rand_pointing(self, sigma=0, header=None, fill='med',
xscale=1, yscale=1, swarp=False, tmpdir=None):
'''
Add pointing uncertainty to WCS
------ INPUT ------
sigma pointing accuracy (arcsec)
header baseline
fill fill value of no data regions after shift
'med': axis median (default)
'avg': axis average
'near': nearest non-NaN value on the same axis
float: constant
xscale,yscale regrouped super pixel size
swarp use SWarp to perform position shifts
Default: False (not support supix)
------ OUTPUT ------
'''
if sigma>=0:
sigma /= 3600.
d_ro = abs(np.random.normal(0., sigma)) # N(0,sigma)
d_phi = np.random.random() *2. * np.pi # U(0,2*pi)
# d_ro, d_phi = 0.0002, 4.5
# print('d_ro,d_phi = ', d_ro,d_phi)
## New header/WCS
if header is None:
header = self.hdr
wcs = fixwcs(header=header, mode='red_dim').wcs
Nx = header['NAXIS1']
Ny = header['NAXIS2']
newheader = header.copy()
newheader['CRVAL1'] += d_ro * np.cos(d_phi)
newheader['CRVAL2'] += d_ro * np.sin(d_phi)
newcs = fixwcs(header=newheader, mode='red_dim').wcs
## Convert world increment to pix increment
pix = wcs.all_world2pix(newheader['CRVAL1'], newheader['CRVAL2'], 1)
d_x = pix[0] - header['CRPIX1']
d_y = pix[1] - header['CRPIX2']
# print('Near CRPIXn increments: ', d_x, d_y)
# val1 = np.array(newcs.all_pix2world(0.5, 0.5, 1))
# d_x, d_y = wcs.all_world2pix(val1[np.newaxis,:], 1)[0] - 0.5
# print('Near (1,1) increments: ', d_x, d_y)
oldimage = self.im
## Resampling
if swarp:
## Set path of tmp files (SWarp use only)
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
## Works but can be risky since iswarp.combine included rand_pointing...
write_fits(path_tmp+'tmp_rand_shift',
newheader, self.im, self.wvl)
swp = iswarp(refheader=self.hdr, tmpdir=path_tmp)
rep = swp.combine(path_tmp+'tmp_rand_shift',
combtype='avg', keepedge=True)
self.im = rep.data
else:
if self.Ndim==3:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((self.Nw,Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=2)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=2)
elif fill=='near':
fill_value = np.nanmean(self.im[:,:,x0[0]:x0[-1]+1],axis=2)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((self.Nw,Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=1)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=1)
elif fill=='near':
fill_value = np.nanmean(cube_supx[:,y0[0]:y0[-1]+1,:],axis=1)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]:y1[-1]+1,:],axis=1)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
else:
supcube[:,ys,:] += (frac2-f2) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]-1:y1[-1],:],axis=1)
else:
supcube[:,ys,:] += (f2+frac1) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[:,y,x] = supcube[:,ys,xs]
elif self.Ndim==2:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=1)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=1)
elif fill=='near':
fill_value = np.nanmean(self.im[:,x0[0]:x0[-1]+1],axis=1)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
else:
cube_supx[:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * | np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1) | numpy.nanmean |
import os
import json
import torch
import random
import numpy as np
from torch.utils.data import Dataset
class VideoDataset(Dataset):
def get_cms_vocab_size(self):
return len(self.get_cms_vocab())
def get_cap_vocab_size(self):
return len(self.get_cap_vocab())
def get_cms_vocab(self):
return self.cms_ix_to_word
def get_cap_vocab(self):
return self.cap_ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt, mode='train'):
super(VideoDataset, self).__init__()
self.mode = mode
self.captions = json.load(open(opt['caption_json']))
cms_info = json.load(open(opt['info_json']))
self.cms_ix_to_word = cms_info['ix_to_word']
self.cms_word_to_ix = cms_info['word_to_ix']
self.splits = cms_info['videos']
# Load caption dictionary
cap_info = json.load(open(opt['cap_info_json']))
self.cap_ix_to_word = cap_info['ix_to_word']
self.cap_word_to_ix = cap_info['word_to_ix']
print('Caption vocab size is ', len(self.cap_ix_to_word))
print('CMS vocab size is ', len(self.cms_ix_to_word))
print('number of train videos: ', len(self.splits['train']))
print('number of test videos: ', len(self.splits['test']))
print('number of val videos: ', len(self.splits['val']))
self.feats_dir = opt['feats_dir']
print('load feats from %s' % self.feats_dir)
self.cap_max_len = opt['cap_max_len']
self.int_max_len = opt['int_max_len']
self.eff_max_len = opt['eff_max_len']
self.att_max_len = opt['att_max_len']
print('max sequence length of caption is', self.cap_max_len)
print('max sequence length of intention is', self.int_max_len)
print('max sequence length of effect is', self.eff_max_len)
print('max sequence length of attribute is', self.att_max_len)
def __getitem__(self, ix):
if self.mode == 'train':
ix = self.captions.keys()
elif self.mode == 'test':
ix = list(self.captions.keys())[ix]
fc_feat = []
for dir in self.feats_dir:
fc_feat.append(np.load(os.path.join(dir, ix+'.npy')))
fc_feat = np.concatenate(fc_feat, axis=1)
cap_mask = | np.zeros(self.cap_max_len) | numpy.zeros |
import numpy as np
from zest import zest
from plaster.run.train_rf import train_rf_worker as worker
from plaster.tools.log.log import debug
def zest_train_rf():
def it_subsamples():
"""
It should be able to extract n_subsample rows (2)
from each peptide where which peptide si originally
sampled at n_samples (8) rows.
"""
n_samples = 8
n_peptides = 5
n_cycles = 3
y0 = np.repeat(np.arange(n_peptides), (n_samples,))
X0 = np.random.uniform(size=(n_samples * n_peptides, n_cycles))
n_subsample = 2
X1, y1 = worker._subsample(n_subsample, X0, y0)
assert | np.all(y1 == [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]) | numpy.all |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
A = pd.read_csv('Hg.xlsx - Hoja1.csv')
Data = [A['Voltage U1'], A['Corriente IA'], A['Corriente IA.1'], A['Corriente IA.2'], A['Corriente IA.3'], A['Corriente IA.4'], A['Corriente IA.5'], A['Corriente IA.6'], A['Corriente IA.7'], A['Corriente IA.8'], A['Corriente IA.9'], A['Corriente IA.10'], A['Corriente IA.11']]
def MaximosHg(n, Data, numberData):
Imax = []
V = []
IA = np.zeros((8, n))
for j in range(8):
for i in range(n):
IA[j][i] = float(Data[numberData][i + 800 + 200*j])
Imax.append(max(IA[j]))
V.append(float(Data[0][list(Data[numberData]).index(str(max(IA[j])))]))
return V, Imax
I = np.zeros((len(Data) - 1, 8))
V = np.zeros((len(Data) - 1, 8))
for i in range(len(Data) - 1):
vmax, imax = MaximosHg(200, Data, i + 1)
I[i] = imax
V[i] = vmax
for j in range(len(Data)):
for i in range(3):
Data[j].pop(i)
Letras = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
Vdif = np.zeros((len(Data) - 1, 7))
for i in range(len(Data) - 1):
for j in range(len(V[1]) - 1):
Vdif[i][j] = np.around( | np.subtract(V[i][j + 1], V[i][j]) | numpy.subtract |
import cv2
import numpy as np
capture = cv2.VideoCapture(0)
background_subtractor = cv2.BackgroundSubtractorMOG()
intruder_classifier = cv2.CascadeClassifier("haarcascade_fullbody.xml")
minimum_area = 500
cv2.namedWindow('Intruder Detection Methods', cv2.WINDOW_AUTOSIZE)
def group_rectangles(rectangles):
x_min = 100000
x_max = 0
y_min = 100000
y_max = 0
for (x, y, w, h) in rectangles:
if x < x_min:
x_min = x
if y < y_min:
y_min = y
if (x + w) > x_max:
x_max = (x + w)
if (y + h) > y_max:
y_max = (y + h)
x = x_min
y = y_min
w = x_max - x_min
h = y_max - y_min
return x, y, w, h
while True:
ret, frame = capture.read()
height, width = frame.shape[:2]
frame = cv2.resize(frame, (width / 3, height / 3), interpolation=cv2.INTER_AREA)
combined = frame.copy()
cascade_image = frame.copy()
foreground_mask = background_subtractor.apply(frame)
(contours, _) = cv2.findContours(foreground_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
large_contours = []
for contour in contours:
if cv2.contourArea(contour) > minimum_area:
large_contours.append(cv2.boundingRect(contour))
foreground_mask_rgb = cv2.cvtColor(foreground_mask, cv2.COLOR_GRAY2RGB)
if len(large_contours) > 0:
(x, y, w, h) = group_rectangles(large_contours)
cv2.rectangle(foreground_mask_rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(combined, (x, y), (x + w, y + h), (0, 255, 0), 2)
intruders = intruder_classifier.detectMultiScale(cascade_image)
for (x, y, w, h) in intruders:
cv2.rectangle(cascade_image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.rectangle(combined, (x, y), (x + w, y + h), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(frame, 'Input', (10, 30), font, 2, (0, 255, 0), 1)
cv2.putText(foreground_mask_rgb, 'Background Subtraction', (10, 30), font, 2, (0, 255, 0), 1)
cv2.putText(cascade_image, 'Haar Cascade Classifier', (10, 30), font, 2, (0, 255, 0), 1)
cv2.putText(combined, 'Output', (10, 30), font, 2, (0, 255, 0), 1)
top_row = np.concatenate((frame, foreground_mask_rgb), axis=1)
bottom_row = | np.concatenate((cascade_image, combined), axis=1) | numpy.concatenate |
# pylint: disable=redefined-outer-name
from copy import deepcopy
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from scipy.stats import linregress
from xarray import Dataset, DataArray
from ..data import load_arviz_data, from_dict, convert_to_inference_data, concat
from ..stats import compare, hpd, loo, r2_score, waic, psislw, summary
from ..stats.stats import _gpinv
@pytest.fixture(scope="session")
def centered_eight():
centered_eight = load_arviz_data("centered_eight")
return centered_eight
@pytest.fixture(scope="session")
def non_centered_eight():
non_centered_eight = load_arviz_data("non_centered_eight")
return non_centered_eight
def test_hpd():
normal_sample = np.random.randn(5000000)
interval = hpd(normal_sample)
assert_array_almost_equal(interval, [-1.88, 1.88], 2)
def test_hpd_bad_ci():
normal_sample = np.random.randn(10)
with pytest.raises(ValueError):
hpd(normal_sample, credible_interval=2)
def test_r2_score():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
assert_almost_equal(res.rvalue ** 2, r2_score(y, res.intercept + res.slope * x).r2, 2)
def test_r2_score_multivariate():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
y_multivariate = np.c_[y, y]
y_multivariate_pred = np.c_[res.intercept + res.slope * x, res.intercept + res.slope * x]
assert not np.isnan(r2_score(y_multivariate, y_multivariate_pred).r2)
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
def test_compare_same(centered_eight, method):
data_dict = {"first": centered_eight, "second": centered_eight}
weight = compare(data_dict, method=method)["weight"]
assert_almost_equal(weight[0], weight[1])
assert_almost_equal(np.sum(weight), 1.0)
def test_compare_unknown_ic_and_method(centered_eight, non_centered_eight):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(NotImplementedError):
compare(model_dict, ic="Unknown", method="stacking")
with pytest.raises(ValueError):
compare(model_dict, ic="loo", method="Unknown")
@pytest.mark.parametrize("ic", ["waic", "loo"])
@pytest.mark.parametrize("method", ["stacking", "BB-pseudo-BMA", "pseudo-BMA"])
@pytest.mark.parametrize("scale", ["deviance", "log", "negative_log"])
def test_compare_different(centered_eight, non_centered_eight, ic, method, scale):
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
weight = compare(model_dict, ic=ic, method=method, scale=scale)["weight"]
assert weight["non_centered"] >= weight["centered"]
assert_almost_equal(np.sum(weight), 1.0)
def test_compare_different_size(centered_eight, non_centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop("Choate", "school")
centered_eight.sample_stats = centered_eight.sample_stats.drop("Choate", "school")
centered_eight.posterior_predictive = centered_eight.posterior_predictive.drop(
"Choate", "school"
)
centered_eight.prior = centered_eight.prior.drop("Choate", "school")
centered_eight.observed_data = centered_eight.observed_data.drop("Choate", "school")
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
with pytest.raises(ValueError):
compare(model_dict, ic="waic", method="stacking")
@pytest.mark.parametrize("var_names_expected", ((None, 10), ("mu", 1), (["mu", "tau"], 2)))
def test_summary_var_names(var_names_expected):
var_names, expected = var_names_expected
centered = load_arviz_data("centered_eight")
summary_df = summary(centered, var_names=var_names)
assert len(summary_df.index) == expected
@pytest.mark.parametrize("include_circ", [True, False])
def test_summary_include_circ(centered_eight, include_circ):
assert summary(centered_eight, include_circ=include_circ) is not None
@pytest.mark.parametrize("fmt", ["wide", "long", "xarray"])
def test_summary_fmt(centered_eight, fmt):
assert summary(centered_eight, fmt=fmt) is not None
@pytest.mark.parametrize("order", ["C", "F"])
def test_summary_unpack_order(order):
data = from_dict({"a": np.random.randn(4, 100, 4, 5, 3)})
az_summary = summary(data, order=order, fmt="wide")
assert az_summary is not None
if order != "F":
first_index = 4
second_index = 5
third_index = 3
else:
first_index = 3
second_index = 5
third_index = 4
column_order = []
for idx1 in range(first_index):
for idx2 in range(second_index):
for idx3 in range(third_index):
if order != "F":
column_order.append("a[{},{},{}]".format(idx1, idx2, idx3))
else:
column_order.append("a[{},{},{}]".format(idx3, idx2, idx1))
for col1, col2 in zip(list(az_summary.index), column_order):
assert col1 == col2
@pytest.mark.parametrize("origin", [0, 1, 2, 3])
def test_summary_index_origin(origin):
data = from_dict({"a": np.random.randn(2, 50, 10)})
az_summary = summary(data, index_origin=origin, fmt="wide")
assert az_summary is not None
for i, col in enumerate(list(az_summary.index)):
assert col == "a[{}]".format(i + origin)
@pytest.mark.parametrize(
"stat_funcs", [[np.var], {"var": np.var, "var2": lambda x: np.var(x) ** 2}]
)
def test_summary_stat_func(centered_eight, stat_funcs):
arviz_summary = summary(centered_eight, stat_funcs=stat_funcs)
assert arviz_summary is not None
assert hasattr(arviz_summary, "var")
def test_summary_nan(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior.theta[:, :, 0] = np.nan
summary_xarray = summary(centered_eight)
assert summary_xarray is not None
assert summary_xarray.loc["theta[0]"].isnull().all()
assert (
summary_xarray.loc[[ix for ix in summary_xarray.index if ix != "theta[0]"]]
.notnull()
.all()
.all()
)
@pytest.mark.parametrize("fmt", [1, "bad_fmt"])
def test_summary_bad_fmt(centered_eight, fmt):
with pytest.raises(TypeError):
summary(centered_eight, fmt=fmt)
@pytest.mark.parametrize("order", [1, "bad_order"])
def test_summary_bad_unpack_order(centered_eight, order):
with pytest.raises(TypeError):
summary(centered_eight, order=order)
@pytest.mark.parametrize("scale", ["deviance", "log", "negative_log"])
def test_waic(centered_eight, scale):
"""Test widely available information criterion calculation"""
assert waic(centered_eight, scale=scale) is not None
assert waic(centered_eight, pointwise=True, scale=scale) is not None
def test_waic_bad(centered_eight):
"""Test widely available information criterion calculation"""
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
waic(centered_eight)
del centered_eight.sample_stats
with pytest.raises(TypeError):
waic(centered_eight)
def test_waic_bad_scale(centered_eight):
"""Test widely available information criterion calculation with bad scale."""
with pytest.raises(TypeError):
waic(centered_eight, scale="bad_value")
def test_waic_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.sample_stats["log_likelihood"][:, :250, 1] = 10
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
# this should throw a warning, but due to numerical issues it fails
centered_eight.sample_stats["log_likelihood"][:, :, :] = 0
with pytest.warns(UserWarning):
assert waic(centered_eight, pointwise=True) is not None
def test_loo(centered_eight):
assert loo(centered_eight) is not None
def test_loo_one_chain(centered_eight):
centered_eight = deepcopy(centered_eight)
centered_eight.posterior = centered_eight.posterior.drop([1, 2, 3], "chain")
centered_eight.sample_stats = centered_eight.sample_stats.drop([1, 2, 3], "chain")
assert loo(centered_eight) is not None
@pytest.mark.parametrize("scale", ["deviance", "log", "negative_log"])
def test_loo_pointwise(centered_eight, scale):
"""Test pointwise loo with different scales."""
loo_results = loo(centered_eight, scale=scale, pointwise=True)
assert loo_results is not None
assert hasattr(loo_results, "loo_scale")
assert hasattr(loo_results, "pareto_k")
assert hasattr(loo_results, "loo_i")
def test_loo_bad(centered_eight):
with pytest.raises(TypeError):
loo(np.random.randn(2, 10))
centered_eight = deepcopy(centered_eight)
del centered_eight.sample_stats["log_likelihood"]
with pytest.raises(TypeError):
loo(centered_eight)
def test_loo_bad_scale(centered_eight):
"""Test loo with bad scale value."""
with pytest.raises(TypeError):
loo(centered_eight, scale="bad_scale")
def test_loo_warning(centered_eight):
centered_eight = deepcopy(centered_eight)
# make one of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, 1] = 10
with pytest.warns(UserWarning):
assert loo(centered_eight, pointwise=True) is not None
# make all of the khats infinity
centered_eight.sample_stats["log_likelihood"][:, :, :] = 0
with pytest.warns(UserWarning):
assert loo(centered_eight, pointwise=True) is not None
def test_psislw():
data = load_arviz_data("centered_eight")
pareto_k = loo(data, pointwise=True, reff=0.7)["pareto_k"]
log_likelihood = data.sample_stats.log_likelihood # pylint: disable=no-member
n_samples = log_likelihood.chain.size * log_likelihood.draw.size
new_shape = (n_samples,) + log_likelihood.shape[2:]
log_likelihood = log_likelihood.values.reshape(*new_shape)
assert_almost_equal(pareto_k, psislw(-log_likelihood, 0.7)[1])
@pytest.mark.parametrize("probs", [True, False])
@pytest.mark.parametrize("kappa", [-1, -0.5, 1e-30, 0.5, 1])
@pytest.mark.parametrize("sigma", [0, 2])
def test_gpinv(probs, kappa, sigma):
if probs:
probs = np.array([0.1, 0.1, 0.1, 0.2, 0.3])
else:
probs = np.array([-0.1, 0.1, 0.1, 0.2, 0.3])
assert len(_gpinv(probs, kappa, sigma)) == len(probs)
@pytest.mark.parametrize("func", [loo, waic])
def test_multidimensional_log_likelihood(func):
np.random.seed(17)
llm = np.random.rand(4, 23, 15, 2)
ll1 = llm.reshape(4, 23, 15 * 2)
statsm = Dataset(dict(log_likelihood=DataArray(llm, dims=["chain", "draw", "a", "b"])))
stats1 = Dataset(dict(log_likelihood=DataArray(ll1, dims=["chain", "draw", "v"])))
post = Dataset(dict(mu=DataArray(np.random.rand(4, 23, 2), dims=["chain", "draw", "v"])))
dsm = convert_to_inference_data(statsm, group="sample_stats")
ds1 = convert_to_inference_data(stats1, group="sample_stats")
dsp = convert_to_inference_data(post, group="posterior")
dsm = concat(dsp, dsm)
ds1 = concat(dsp, ds1)
frm = func(dsm)
fr1 = func(ds1)
assert (fr1 == frm).all()
| assert_array_almost_equal(frm[:4], fr1[:4]) | numpy.testing.assert_array_almost_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 14:54:18 2017
@author: 19514733
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from cvxopt import matrix as cvxmat, solvers
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils import column_or_1d
import monoboost
#__all__ = [
# "Scale",
# "MonoComparator",
# "MonoLearner",
# "MonoBoost",
# "MonoBoostEnsemble",
# "apply_rules_c"]
TOL = 0 # 1e-55
class Scale():
"""Performs scaling of linear variables according to Friedman et al. 2005
Sec 5
Each variable is firsst Winsorized l->l*, then standardised as 0.4 x l* /
std(l*).
Warning: this class should not be used directly.
"""
def __init__(self, trim_quantile=0.0):
self.trim_quantile = trim_quantile
self.scale_multipliers = None
self.winsor_lims = None
def train(self, X):
# get winsor limits
self.winsor_lims = np.ones([2, X.shape[1]]) * np.inf
self.winsor_lims[0, :] = -np.inf
if self.trim_quantile > 0:
for i_col in np.arange(X.shape[1]):
lower = np.percentile(X[:, i_col], self.trim_quantile * 100)
upper = np.percentile(
X[:, i_col], 100 - self.trim_quantile * 100)
self.winsor_lims[:, i_col] = [lower, upper]
# get multipliers
scale_multipliers = np.ones(X.shape[1])
for i_col in np.arange(X.shape[1]):
num_uniq_vals = len(np.unique(X[:, i_col]))
# don't scale binary variables which are effectively already rules:
if num_uniq_vals > 2:
X_col_winsorised = X[:, i_col].copy()
X_col_winsorised[X_col_winsorised <
self.winsor_lims[0, i_col]
] = self.winsor_lims[0, i_col]
X_col_winsorised[X_col_winsorised >
self.winsor_lims[1, i_col]
] = self.winsor_lims[1, i_col]
scale_multipliers[i_col] = 1.0 / np.std(X_col_winsorised)
self.scale_multipliers = scale_multipliers
def scale(self, X):
return X * self.scale_multipliers
def unscale(self, X):
return X / self.scale_multipliers
class MonoComparator():
def __init__(self, n_feats, incr_feats, decr_feats, nmt_hyperplane=None):
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.nmt_hyperplane = nmt_hyperplane
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.n_feats = n_feats
def compare(self, x1_in, x2_in, check_nmt_feats=True, strict=False):
# returns: -1 if decreasing, 0 if identical, +1 if increasing, -99 if
# incomparable
if len(self.mt_feats) == 0:
return -99
elif len(x1_in.shape) > 1:
x1 = np.ravel(x1_in)
x2 = np.ravel(x2_in)
else:
x1 = x1_in.copy()
x2 = x2_in.copy()
# check for identical
if np.array_equal(x1, x2):
return 0
# reverse polarity of decreasing features
for dec_feat in self.decr_feats:
x1[dec_feat - 1] = -1 * x1[dec_feat - 1]
x2[dec_feat - 1] = -1 * x2[dec_feat - 1]
# check mt feats all increasing (or decreasing)
mt_feats_difference = np.zeros(self.n_feats)
if len(self.mt_feats) > 0:
feats_indx = self.mt_feats - 1
mt_feats_difference[feats_indx] = x2[feats_indx] - x1[feats_indx]
mt_feats_same = np.sum(mt_feats_difference[self.mt_feats - 1] == 0)
if strict:
mt_feats_incr = np.sum(mt_feats_difference[self.mt_feats - 1] > 0)
mt_feats_decr = np.sum(mt_feats_difference[self.mt_feats - 1] < 0)
else:
mt_feats_incr = np.sum(mt_feats_difference[self.mt_feats - 1] >= 0)
mt_feats_decr = np.sum(mt_feats_difference[self.mt_feats - 1] <= 0)
if mt_feats_same == len(self.mt_feats):
comp = 0
elif mt_feats_incr == len(self.mt_feats): # increasing
comp = +1
elif mt_feats_decr == len(self.mt_feats): # decreasing
comp = -1
else: # incomparale
comp = -99
# short exit if available
if comp == -99 or comp == 0:
return -99
# if still going, check mt feats by weakened planes
if len(
self.nmt_feats) == 0 or not check_nmt_feats or (
self.nmt_hyperplane is None):
nmt_feat_compliance = True
else:
x_diff = np.abs(x2 - x1)
dot_prod = np.dot(self.nmt_hyperplane, x_diff)
nmt_feat_compliance = dot_prod >= -TOL
# return result
if nmt_feat_compliance:
return comp
else: # incomparable due to nmt features
return -99
class MonoLearner():
def __init__(
self,
n_feats,
incr_feats,
decr_feats,
coefs=None,
dirn=None,
x_base=None,
nmt_hyperplane=None,
learner_type='two-sided',
loss='rmse'):
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.coefs = coefs
self.dirn = dirn
self.x_base = x_base
self.intercept_=0.
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.mt_feat_types=np.zeros(n_feats,dtype=np.float64)
if len(self.incr_feats)>0:
self.mt_feat_types[self.incr_feats-1]=+1.
if len(self.decr_feats)>0:
self.mt_feat_types[self.decr_feats-1]=-1.
self.comparator = MonoComparator(
n_feats, incr_feats, decr_feats, nmt_hyperplane)
self.nmt_hyperplane = nmt_hyperplane
self.learner_type_code = 0 if learner_type == 'two-sided' else 1
# note loss only affects the calculation of the coefficients - all
# splits are done RMSE
self.loss = loss
@property
def nmt_hyperplane(self):
"""I'm the 'x' property."""
return self.comparator.nmt_hyperplane
@nmt_hyperplane.setter
def nmt_hyperplane(self, value):
self.comparator.nmt_hyperplane = value
def get_comparable_points(self,X):
intercepts=np.asarray([self.intercept_],dtype=np.float64)
if len(X.shape)<2:
X_=np.asarray(X.reshape([1,-1]),dtype=np.float64)
else:
X_=np.asarray(X,dtype=np.float64)
X_base_pts_=np.asarray(self.x_base.reshape([1,-1]),dtype=np.float64)
nmt_hps_=np.asarray(self.nmt_hyperplane.reshape([1,-1]),dtype=np.float64)
X_comp_pts=np.zeros([X_.shape[0],X_base_pts_.shape[0]],dtype=np.int32)
monoboost.apply_rules_c(X_,
X_base_pts_,
nmt_hps_,
intercepts,
self.mt_feat_types,
np.float64(self.dirn),
0,
X_comp_pts)
return X_comp_pts[:,0]==1
def decision_function(self, X_pred):
if len(X_pred.shape)==1:
X_pred_=np.asarray(X_pred.reshape([1,-1]),dtype=np.float64)
else:
X_pred_=np.asarray(X_pred,dtype=np.float64)
dirn=self.dirn
X_rule_transform_=np.zeros([X_pred.shape[0],1],dtype=np.int32)
monoboost.apply_rules_c(X_pred_,
np.asarray(self.x_base.reshape([1,-1]),dtype=np.float64),
np.asarray(self.nmt_hyperplane.reshape([1,-1]),dtype=np.float64),
np.asarray([self.intercept_],dtype=np.float64),
self.mt_feat_types,
np.float64(dirn),
0,
X_rule_transform_)
X_rule_transform_=X_rule_transform_.ravel()
is_comp=X_rule_transform_
y_pred = np.zeros(X_pred_.shape[0])
y_pred[X_rule_transform_==1]=self.coefs[1]
y_pred[X_rule_transform_==0]=self.coefs[0]
return [y_pred,is_comp]
# def predict_proba(self, X_pred):
# if len(X_pred.shape) == 1:
# X_pred_ = np.zeros([1, len(X_pred)])
# X_pred_[0, :] = X_pred
# else:
# X_pred_ = X_pred
#
# y_pred = np.zeros(X_pred_.shape[0])
# is_comp = np.zeros(X_pred_.shape[0])
# for i in np.arange(len(y_pred)):
# comp = self.comparator.compare(self.x_base, X_pred_[i, :])
# is_comp[i] = 1 if comp == 0 or comp == self.dirn else 0
# y_pred[i] = self.coefs[1] if (
# comp == 0 or comp == self.dirn) else self.coefs[0]
# return [y_pred, is_comp]
def fit_from_cache(
self,
cached_local_hp_data,
X,
y,
res_train,
curr_totals,
hp_reg=None,
hp_reg_c=None):
best = [1e99, -1, -99, -1, [-1, -1]] # err, base, dirn, hp, coefs
for i in np.arange(X.shape[0]):
data_i = cached_local_hp_data[i]
for dirn in [-1, +1]:
data_dirn = data_i[dirn]
vs = data_dirn['vs']
hps = data_dirn['hps']
comp_idxs = data_dirn['comp_idxs']
for i_v in np.arange(len(vs)):
comp_pts = comp_idxs[i_v]
incomp_pts = np.asarray(np.setdiff1d(
np.arange(X.shape[0]), comp_pts))
hp = hps[i_v, :]
res_comp_pts = res_train[comp_pts]
res_incomp_pts = res_train[incomp_pts]
mean_res_in = np.mean(res_comp_pts)
mean_res_out = np.mean(res_incomp_pts)
sse = np.sum((res_train[comp_pts] - mean_res_in)**2) + \
np.sum((res_train[incomp_pts] - mean_res_out)**2)
if hp_reg is not None and len(self.nmt_feats) > 0:
if hp_reg == 'L1_nmt' or hp_reg == 'L2_nmt':
sse = sse + hp_reg_c * \
np.linalg.norm(hp[self.nmt_feats - 1], ord=1
if hp_reg == 'L1_nmt' else
2)**(1 if hp_reg == 'L1_nmt'
else 2)
elif hp_reg == 'L1' or hp_reg == 'L2':
sse = sse + hp_reg_c * \
np.linalg.norm(hp, ord=1 if hp_reg == 'L1' else
2)**(1 if hp_reg == 'L1' else 2)
if sse <= best[0] and len(
comp_pts) > 0:
if self.loss == 'deviance':
sum_res_comp = np.sum(np.abs(res_comp_pts) * (
1 - np.abs(res_comp_pts)))
sum_res_incomp = np.sum(np.abs(res_incomp_pts) * (
1 - np.abs(res_incomp_pts)))
signed_sum_res_comp = np.sum(res_comp_pts)
signed_sum_res_incomp = np.sum(res_incomp_pts)
if (sum_res_comp > 1e-9 and
sum_res_incomp > 1e-9 and
np.abs(signed_sum_res_comp) > 1e-9 and
np.abs(signed_sum_res_incomp) > 1e-9):
coef_in = 0.5 * signed_sum_res_comp / \
(sum_res_comp)
if self.learner_type_code == 0: # two sided
coef_out = 0.5 * signed_sum_res_incomp / \
(sum_res_incomp)
ratio = np.max(
[np.abs(coef_in / coef_out),
np.abs(coef_out / coef_in)])
elif self.learner_type_code == 1: # one-sided
[coef_out, ratio] = [0., 0.5]
else:
coef_in = 0
coef_out = 0
ratio = 0.
elif self.loss == 'rmse':
#use_M-regression (huber loss)
use_huber=True
if use_huber:
q_alpha=0.5
q_in=np.percentile(np.abs(y[comp_pts] - curr_totals[comp_pts]),q_alpha)
res_in=y[comp_pts] - curr_totals[comp_pts]
median_in=np.median(res_in)
coef_in = median_in + (
1/len(comp_pts)*(np.sum(np.sign(res_in-
median_in)*np.min(np.hstack([q_in*np.ones(len(res_in)).reshape([-1,1]),np.abs(res_in-
median_in).reshape([-1,1])]),axis=1))))
if self.learner_type_code == 1:
coef_out=0
else:
q_out=np.percentile(np.abs(y[incomp_pts] - curr_totals[incomp_pts]),q_alpha)
res_out=y[incomp_pts] - curr_totals[incomp_pts]
median_out=np.median(res_out)
coef_out = median_out + (
1/len(incomp_pts)*(np.sum(np.sign(res_out-
median_out)*np.min(np.hstack([q_out*np.ones(len(res_out)).reshape([-1,1]),np.abs(res_out-
median_out).reshape([-1,1])]),axis=1))))
else:
coef_in = np.median(
y[comp_pts] - curr_totals[comp_pts])
coef_out = (0 if self.learner_type_code == 1 else
np.median(y[incomp_pts] -
curr_totals[incomp_pts]))
ratio = 0.
#if np.sign(coef_in) == dirn and np.sign(coef_out) == -dirn (
if coef_in*dirn > coef_out * dirn and (
coef_in != np.inf and coef_out != np.inf and
ratio < 1e9):
best = [
sse, i, dirn, hp, [
coef_out, coef_in]]
self.x_base = X[best[1], :]
self.coefs = best[4]
self.dirn = best[2]
self.nmt_hyperplane = best[3]
return self
def transform(self, X_pred_):
"""Transform dataset.
Parameters
----------
X: array-like matrix
Returns
-------
X_transformed: array-like matrix, shape=(n_samples, 1)
"""
res = np.asarray([1 if self.comparator.compare(self.x_base, X_pred_[
i, :]) * self.dirn in [0, 1] else 0 for i in
np.arange(X_pred_.shape[0])])
return res
class MonoBoost():
""" Partially Monotone Boosting classifier
var
Attributes
----------
eg_attr: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
XXX
See also
--------
YYY
"""
# NB: fit_algo is irrelevant for fit_type='quadratic'
def __init__(self,
n_feats,
incr_feats,
decr_feats,
num_estimators=10,
fit_algo='L2-one-class',
eta=1.,
vs=[0.001,
0.1,
0.25,
0.5,
1],
verbose=False,
hp_reg=None,
hp_reg_c=None,
incomp_pred_type='default',
learner_type='one-sided',
random_state=None,
standardise=True,
classes=None,
loss='auto'):
self.X = None
self.y = None
self.classes=classes
self.n_feats = n_feats
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.mt_feat_types=np.zeros(n_feats,dtype=np.float64)
if len(self.incr_feats)>0:
self.mt_feat_types[self.incr_feats-1]=+1.
if len(self.decr_feats)>0:
self.mt_feat_types[self.decr_feats-1]=-1.
self.fitted = False
self.standardise = standardise
self.fit_algo = fit_algo
self.eta = eta
self.num_estimators = num_estimators
self.vs = vs
self.mt_comparator = MonoComparator(
n_feats, incr_feats, decr_feats, nmt_hyperplane=None)
self.verbose = verbose
self.hp_reg = hp_reg
self.hp_reg_c = hp_reg_c
self.y_pred_num_comp_ = None
self.incomp_pred_type = incomp_pred_type
self.learner_type = learner_type
self.random_state = np.random.randint(
1e6) if random_state is None else random_state
np.random.seed(self.random_state)
self.loss = loss#'auto'
self.__estimators_base_pts__=None
self.__estimators_dirns__=None
self.__estimators_intercepts__=None
self.__estimators_hyperplanes__=None
def get_estimator_matrices(self):
if self.__estimators_base_pts__ is None:
self.__estimators_base_pts__={}
self.__estimators_dirns__={}
self.__estimators_hyperplanes__={}
self.__estimators_intercepts__={}
self.__estimators_coefs__={}
for k in self.ks:
self.__estimators_base_pts__[k]=np.asarray([est.x_base for est in self.estimators[k]],dtype=np.float64)
self.__estimators_dirns__[k]=np.asarray([est.dirn for est in self.estimators[k]],dtype=np.float64)
self.__estimators_hyperplanes__[k]=np.asarray([est.nmt_hyperplane for est in self.estimators[k]],dtype=np.float64)
self.__estimators_intercepts__[k]=np.asarray([est.intercept_ for est in self.estimators[k]],dtype=np.float64)
self.__estimators_coefs__[k]=np.asarray([est.coefs for est in self.estimators[k]],dtype=np.float64)
return [self.__estimators_base_pts__,
self.__estimators_dirns__,
self.__estimators_intercepts__,
self.__estimators_hyperplanes__,
self.__estimators_coefs__]
@property
def y_maj_class_calc(self):
"""I'm the 'x' property."""
return -1 if np.sum(self.y == -1) / len(self.y) >= 0.5 else +1
@property
def y_pred_num_comp(self):
"""I'm the 'x' property."""
if not hasattr(self, 'y_pred_num_comp_'):
self.y_pred_num_comp_ = None
if self.y_pred_num_comp_ is None:
[ypred, num_comp] = self.predict_proba(self.X)
self.y_pred_num_comp_ = num_comp
return self.y_pred_num_comp_
def solve_hp(self, incr_feats, decr_feats, delta_X, v, weights=None):
N = delta_X.shape[0]
p = delta_X.shape[1]
num_feats = p
mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
nmt_feats = np.asarray(
[j for j in np.arange(num_feats) + 1 if j not in mt_feats])
solvers.options['show_progress'] = False
if N == 0:
return [-99]
else:
# Build QP matrices
# Minimize 1/2 x^T P x + q^T x
# Subject to G x <= h
# A x = b
if weights is None:
weights = np.ones(N)
P = np.zeros([p + N, p + N])
for ip in nmt_feats - 1:
P[ip, ip] = 1
q = 1 / (N * v) * np.ones((N + p, 1))
q[0:p, 0] = 0
q[p:, 0] = q[p:, 0] * weights
G1a = np.zeros([p, p])
for ip in np.arange(p):
G1a[ip, ip] = -1 if ip in mt_feats - 1 else 1
G1 = np.hstack([G1a, np.zeros([p, N])])
G2 = np.hstack([np.zeros([N, p]), -np.eye(N)])
G3 = np.hstack([delta_X, -np.eye(N)])
G = np.vstack([G1, G2, G3])
h = np.zeros([p + 2 * N])
A = np.zeros([1, p + N])
for ip in np.arange(p):
A[0, ip] = 1 if ip in mt_feats - 1 else -1
b = np.asarray([1.])
P = cvxmat(P)
q = cvxmat(q)
A = cvxmat(A)
b = cvxmat(b)
# options['abstol']=1e-20 #(default: 1e-7).
# options['reltol']=1e-11 #(default: 1e-6)
sol = solvers.qp(P, q, cvxmat(G), cvxmat(h), A, b)
if sol['status'] != 'optimal':
print(
'****** NOT OPTIMAL ' +
sol['status'] +
' ******* [N=' +
str(N) +
', p=' +
str(p) +
']')
return [-99]
else:
soln = sol['x']
w = np.ravel(soln[0:p, :])
# err = np.asarray(soln[-N:, :])
return w
def get_deltas(self, X_base_pt, X, y):
dirns = [-1, 1]
deltas = [np.zeros([X.shape[0], X.shape[1]]),
np.zeros([X.shape[0], X.shape[1]])]
comp_indxs = []
for dirn in dirns:
idirn = 0 if dirn == -1 else 1
i_j = 0
comp_indxs_ = []
for j in np.arange(X.shape[0]):
if y[j] == -dirn:
comp = self.mt_comparator.compare(X_base_pt, X[j, :])
if comp == dirn or comp == 0:
comp_indxs_ = comp_indxs_ + [j]
d_ = X[j, :] - X_base_pt
# if not np.all(d_==0.):
deltas[idirn][i_j, :] = np.abs(d_)
i_j = i_j + 1
deltas[idirn] = deltas[idirn][0:i_j, :]
comp_indxs = comp_indxs + [np.asarray(comp_indxs_)]
return [comp_indxs, deltas]
def get_comparable_points(self,X,X_base_pts,nmt_hps,intercepts,dirn):
if np.isscalar(intercepts):
intercepts=np.asarray([intercepts],dtype=np.float64)
if len(X.shape)<2:
X_=np.asarray(X.reshape([1,-1]),dtype=np.float64)
else:
X_=np.asarray(X,dtype=np.float64)
if len(X_base_pts.shape)<2:
X_base_pts_=np.asarray(X_base_pts.reshape([1,-1]),dtype=np.float64)
else:
X_base_pts_=np.asarray(X_base_pts,dtype=np.float64)
if len(nmt_hps.shape)<2:
nmt_hps_=np.asarray(nmt_hps.reshape([1,-1]),dtype=np.float64)
else:
nmt_hps_=np.asarray(nmt_hps,dtype=np.float64)
X_comp_pts=np.zeros([X_.shape[0],X_base_pts_.shape[0]],dtype=np.int32)
monoboost.apply_rules_c(X_,
X_base_pts_,
nmt_hps_,
intercepts,
self.mt_feat_types,
np.float64(dirn),
0,
X_comp_pts)
return X_comp_pts
def get_base_comparable_pairs(self,X):
if len(X.shape)==1:
X_=X.reshape([1,-1])
else:
X_=X
# set nmt to zero because we are not considering nmt feats
__hyperplanes__=np.zeros(X_.shape,dtype=np.float64)
__intercepts__=np.zeros(X_.shape[0],dtype=np.float64)
X_pts_above=np.zeros([X_.shape[0],X_.shape[0]],dtype=np.int32)
monoboost.apply_rules_c(X_,
X_,
__hyperplanes__,
__intercepts__,
self.mt_feat_types,
np.float64(-1),
0,
X_pts_above)
X_pts_below=np.zeros([X_.shape[0],X_.shape[0]],dtype=np.int32)
monoboost.apply_rules_c(X_,
X_,
__hyperplanes__,
__intercepts__,
self.mt_feat_types,
np.float64(+1),
0,
X_pts_below)
# remove self-comparisons
# for i in np.arange(X_.shape[0]):
# X_pts_above[i,i]=0
# X_pts_below[i,i]=0
return [X_pts_below==1,X_pts_above==1]
def fit_cache(self, X, y, svm_vs):
X_rev_dec = X.copy()
for dec_feat in self.decr_feats:
X_rev_dec[:, dec_feat - 1] = -1 * X_rev_dec[:, dec_feat - 1]
hp_data = dict()
calc_hyperplanes = self.learner_type != 'ignore_nmt_feats'
# get comparable pts
[X_pts_below,X_pts_above]=self.get_base_comparable_pairs(X)
for i in np.arange(X.shape[0]):
dirn_ = dict()
dirn_pos = dict()
dirn_neg = dict()
dirn_[-1] = dirn_neg
dirn_[1] = dirn_pos
hp_data[i] = dirn_
x_i = X[i, :]
# get base comparable pts in given direction
# pts_above = []
# pts_below = []
# for j in np.arange(X.shape[0]):
# # if i!=j:
# comp = self.mt_comparator.compare(X[i, :], X[j, :])
# if comp == +1 or comp == 0:
# pts_above = pts_above + [j]
# if comp == -1 or comp == 0:
# pts_below = pts_below + [j]
pts_above=np.arange(X.shape[0])[X_pts_above[i,:]]
pts_below=np.arange(X.shape[0])[X_pts_below[i,:]]
# calculate hyperplanes
for dirn in [-1, +1]:
base_comp_idxs = np.asarray(
pts_above if dirn == 1 else pts_below)
hps = | np.zeros([0, X.shape[1]]) | numpy.zeros |
"""
Market Data Presenter.
This module contains implementations of the DataPresenter abstract class, which
is responsible for presenting data in the form of mxnet tensors. Each
implementation presents a different subset of the available data, allowing
different models to make use of similar data.
"""
from typing import Dict, List, Optional, Tuple
from abc import abstractmethod
import pandas as pd
import numpy as np
from mxnet import ndarray as nd
from . import providers, utils
class DataPresenter:
"""
Abstract class defining the DataProvider API.
"""
@abstractmethod
def get_training_batch(self, size: int):
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
@abstractmethod
def get_validation_batch(self, size: int):
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
@abstractmethod
def data_array(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
@abstractmethod
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
class IntradayPresenter:
"""
Loads data consisting only of intraday information, guaranteed to keep all
within market hours.
"""
# All it does is load data - no other calls necessary
# pylint: disable=too-few-public-methods
def __init__(self, provider: providers.DataProvider, *, window: int = 45,
valid_seed: int = 0, lookahead: int = 10,
normalize: bool = True, features: Dict[str, bool] = {},
**kwargs):
"""
Init function. Takes a +provider+ from which it extracts data and
a variety of other arguments. See info files for examples.
"""
# pylint: disable=too-many-instance-attributes
# Store basic setup parameters
self.provider = provider
self._window = window
self._valid_seed = valid_seed
self._lookahead = lookahead
self._normalize = normalize
self._features = [feat for feat in features if features[feat]]
self._outputs = []
# Collect and decide features
for feature in self._features:
# First handle special features
if feature == 'macd':
self._outputs.append('macd_signal')
if feature == 'vortex':
self._outputs.extend(['vortex+', 'vortex-'])
continue
if feature == 'stochastic':
self._outputs.extend(['%K', '%D'])
continue
if feature == 'williams':
self._outputs.append('%R')
continue
if feature == 'dysart':
self._outputs.extend(['pvi', 'nvi'])
continue
if feature == 'bollinger':
self._outputs.extend(['bollinger+', 'bollinger=', 'bollinger-'])
continue
# Then add all others
self._outputs.append(feature)
# Decide range of possible dates in advance
self._first = provider.first()
# TODO don't limit this anymore
self._latest = provider.latest() - pd.to_timedelta(2, unit='day')
# Cache for already processed data to cut down on disk usage
self._train_cache = {}
self._val_cache = {}
# Cache of holidays to prevent continuously recalculating them
self._holidays = utils.trading_holidays(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
self._half_days = utils.trading_half_days(self._first - pd.to_timedelta(1, unit='day'),
self._latest)
def get_training_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of training data, partitioned from the validation data,
of size +size+.
"""
return self._get_batch(size, validation=False)
def get_validation_batch(self, size: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a batch of validation data, partitioned from the training data,
of size +size+.
"""
return self._get_batch(size, validation=True)
def data_array(self, timestamp: pd.Timestamp) -> nd.NDArray:
"""
Returns the data associated with a single +timestamp+ in mxnet form
"""
start_time = timestamp - pd.to_timedelta(self._window, unit='min')
return self._get_data(start_time, False)[0]
@abstractmethod
def data_frame(self, timestamp: pd.Timestamp):
"""
Returns the data associated with a single +timestamp+ in pandas form.
"""
data = self._extract_daily_data(timestamp)
if data is None:
return None
return data.loc[timestamp, :]
def _get_data(self, time: pd.Timestamp, validation: bool) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a simgle data sample starting at a given +time+. Uses
+validation+ to distinguish between training and validation sets.
NOTE: This function assumes that the entire data window is available.
If a time provided is too late to obtain a full window, behavior
is UNPREDICTABLE.
"""
# Check if the sample has already been cached.
day = time.floor('D')
start_index = (time.hour - 9) * 60 + (time.minute - 30)
end_index = start_index + self._window
if validation and day in self._val_cache:
data, target = self._val_cache[day]
return data[start_index: end_index], target[start_index: end_index]
if not validation and day in self._train_cache:
data, target = self._train_cache[day]
return data[start_index: end_index], target[start_index: end_index]
# Otherwase generate, cache, and return it
data, target = self._to_daily_input_data(day)
if validation:
self._val_cache[day] = (data, target)
else:
self._train_cache[day] = (data, target)
return data[start_index: end_index], target[start_index: end_index]
def _to_daily_input_data(self, date: pd.Timestamp) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Transforms a set of intraday data for a +date+ to an array appropriate
for input to the model, and a target set of predictions against which
to compare outputs.
"""
# Gather data requested data components. Note that this seemingly
# over-complicated method guarantees that they remain in the order
# prescribed by the feature list.
datas = []
for feat in self._outputs:
if feat == "high":
datas.append(_to_intraday_high(date, self.provider,
normalize=self._normalize))
elif feat == "low":
datas.append(_to_intraday_low(date, self.provider,
normalize=self._normalize))
elif feat == "change":
datas.append(_to_intraday_change(date, self.provider,
normalize=self._normalize))
elif feat == "open":
datas.append(_to_intraday_open(date, self.provider,
normalize=self._normalize))
elif feat == "volume":
datas.append(_to_intraday_volume(date, self.provider,
normalize=self._normalize))
elif feat == "time":
datas.append(_to_intraday_time(date, self.provider,
normalize=self._normalize))
elif feat == "macd":
# For MACD, include both MACD and its signal
macd, macd_signal = _to_intraday_macd(date, self.provider,
normalize=self._normalize)
datas.extend([macd_signal, macd])
elif feat == "mass_index":
datas.append(_to_intraday_mass_index(date, self.provider))
elif feat == "trix15":
datas.append(_to_intraday_trix(date, self.provider, 15))
elif feat == "vortex+":
vortex_up, vortex_down = _to_intraday_vortex(date,
self.provider, 25)
datas.extend([vortex_up, vortex_down])
elif feat == "%K":
pK, pD = _to_intraday_stochastic(date, self.provider, 30)
datas.extend([pK, pD])
elif feat == "rsi":
datas.append(_to_intraday_rsi(date, self.provider, 14))
elif feat == "%R":
# The Williams %R is mathematically equivalent to (1 - %K). It
# is duplicated here to obtain a shorter period.
pK, _ = _to_intraday_stochastic(date, self.provider, 10)
datas.append(pK - 1)
elif feat == "accdist":
datas.append(_to_intraday_accdist(date, self.provider))
elif feat == "mfi":
datas.append(_to_intraday_mfi(date, self.provider, 30))
elif feat == "vpt":
datas.append(_to_intraday_vpt(date, self.provider))
elif feat == "obv":
datas.append(_to_intraday_obv(date, self.provider))
elif feat == "pvi":
pvi, nvi = _to_intraday_dysart(date, self.provider)
datas.extend([pvi, nvi])
elif feat == "bollinger+":
b_top, b_mid, b_bottom = _to_intraday_bollinger(date,
self.provider,
30, 2)
datas.extend([b_top, b_mid, b_bottom])
elif feat == "ultimate":
datas.append(_to_intraday_ultimate(date, self.provider))
elif feat == "cci":
datas.append(_to_intraday_cci(date, self.provider))
elif feat == "target":
datas.append(_to_intraday_target(date, self.provider,
self._lookahead,
normalize=self._normalize))
# Gather target data and return data/target arrays
target = _to_intraday_target(date, self.provider, self._lookahead,
normalize=self._normalize)
return nd.stack(*datas, axis=1), target.reshape(-1, 1)
def _extract_daily_data(self, date: pd.Timestamp) -> Optional[pd.DataFrame]:
"""
Gets the market data for a given day, restricted to market hours.
"""
data = self.provider.intraday(date)
if data is None or data.empty:
return None
return data
def _get_batch(self, batch_size: int, validation: bool = False) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Gets a random batch of data of size +batch_size+. Returns a tuple of
data and target predictions. If +validation+ is set, prevents these
dates from being drawn for non-validation batches.
"""
# Define a Callable for testing appropriate dates
def _is_suitable_time(time: pd.Timestamp) -> bool:
"""
Returns whether the market is open at a given +time+ for the
required window.
"""
# First, confirm that this date matches the right type
day = time.floor(freq='D')
is_validation_date = (day.dayofyear % 10 == self._valid_seed)
if validation != is_validation_date:
return False
# Ensure it's on weekdays and during market hours. Note that we
# discard the last 10 minutes of trading because they are both
# dangerous for day trading and provide no good way to train the
# 10 minute output for the model.
if time.weekday() > 4:
return False
if (time.hour * 60 + time.minute) < 9 * 60 + 30:
return False
if (time.hour * 60 + time.minute + self._window) > 15 * 60 - self._lookahead:
return False
# Check aginst holidays. Note that for the sake of sanity, we
# don't include half days.
if day in self._holidays or day in self._half_days:
return False
return True
# Next, generate arrays of random dates within the last two years,
# recording appropriate ones to form an array of size +batch_size+
timestamps = pd.Series()
while True:
random_times = pd.to_datetime(np.random.randint(low=self._first.value,
high=self._latest.value,
size=(100),
dtype='int64')).to_series()
suitable_mask = random_times.apply(_is_suitable_time)
timestamps = pd.concat([timestamps, random_times.loc[suitable_mask]])
if len(timestamps) >= batch_size:
timestamps = timestamps[0 : batch_size]
break
index_array = pd.to_datetime(timestamps)
# Next, gather all data into batches with axes (batch, window, data...)
datas, targets = [], []
for timestamp in index_array:
data, target = self._get_data(timestamp, validation)
datas.append(data)
targets.append(target)
data_array, target_array = nd.stack(*datas), nd.stack(*targets)
# Return the data
return data_array, target_array
def data_features(self) -> List[str]:
"""
Returns a list of data features in the same order as presented in the
frames.
"""
return self._outputs
def _get_intraday_data(date: pd.Timestamp, provider: providers.DataProvider) \
-> pd.DataFrame:
"""
Gets the intraday datafrome limited to market hours for a given +date+
and +provider+.
"""
# First, get data and limit it to market hours
data = provider.intraday(date)
if data is None or data.empty:
raise RuntimeError(f"Something went wrong - empty data array for {date}!")
start = data.index[0].replace(hour=9, minute=30)
end = data.index[0].replace(hour=16, minute=0)
# Next, resample the data by the minute and interpolate missing values
data = data.loc[data.index.isin(pd.date_range(start=start, end=end, freq='min'))]
data = data.resample('min')
data = data.interpolate(method='time').copy()
return data
def _to_intraday_high(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
high = ((data.high - data.open) / data.open) if normalize else data.high
return nd.array(high.values, utils.try_gpu(0))
def _to_intraday_low(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
open price.
"""
data = _get_intraday_data(date, provider)
low = ((data.low - data.open) / data.open) if normalize else data.low
return nd.array(low.values, utils.try_gpu(0))
def _to_intraday_change(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute close of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
previous close
"""
data = _get_intraday_data(date, provider)
close_prev = data.close.shift(periods=1, fill_value=data.close[0])
close = ((data.close - close_prev) / close_prev) if normalize else data.close
return nd.array(close.values, utils.try_gpu(0))
def _to_intraday_open(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute open of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
daily open price.
"""
data = _get_intraday_data(date, provider)
open = (data.open / data.open.iloc[0]) if normalize else data.open
return nd.array(open.values, utils.try_gpu(0))
def _to_intraday_volume(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute high of a data series for
a given +date+ and +provider+. If +normalize+, it is divided by the
average volume.
"""
data = _get_intraday_data(date, provider)
vol = data.volume / data.volume.mean() if normalize else data.volume
return nd.array(vol.values, utils.try_gpu(0))
def _to_intraday_time(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> nd.NDArray:
"""
Returns an ndarray consisting of the trading minute of a data series for
a given +date+ and +provider+. If +normalize+, it is normalized so that
9:30 is 0 and 16:00 is 1
"""
data = _get_intraday_data(date, provider)
minute = data.index.hour * 60 + data.index.minute - (9 * 60 + 30)
tempus = (minute / (60 * 7 + 30)) if normalize else minute
return nd.array(tempus.values, utils.try_gpu(0))
def _to_intraday_macd(date: pd.Timestamp, provider: providers.DataProvider,
normalize: bool = True) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndarrays consisting of the per-minute MACD of a data
series for a given +date+ and +provider+, and a signal for the same. If
normalize+, both are divided by the daily open price.
"""
# First, calculate the MACD via exponential moving averages
data = _get_intraday_data(date, provider)
ewm12 = pd.Series.ewm(data['close'], span=12).mean()
ewm26 = pd.Series.ewm(data['close'], span=26).mean()
macd = ewm26 - ewm12
# Next, calculate the signal line
signal = pd.Series.ewm(macd, span=9).mean()
# Return both
return nd.array(macd.values, utils.try_gpu(0)), \
nd.array(signal.values, utils.try_gpu(0))
def _to_intraday_trix(date: pd.Timestamp, provider: providers.DataProvider,
period: int)-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns an ndarray containing the TRIX for a given +data+ and +provider+,
averaged across a given +period+.
"""
# First, get the triple-smoothed 15 period exponential moving average
data = _get_intraday_data(date, provider)
ewm1 = pd.Series.ewm(data['close'], span=period).mean()
ewm2 = pd.Series.ewm(ewm1, span=period).mean()
ewm3 = pd.Series.ewm(ewm2, span=period).mean()
# Return the percentage change from last period
ewm3_yesterday = ewm3.shift(periods=1, fill_value=ewm3[0])
trix = (ewm3 / ewm3_yesterday) - 1
return nd.array(trix.values, utils.try_gpu(0))
def _to_intraday_mass_index(date: pd.Timestamp,
provider: providers.DataProvider) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute mass index of a data series
for a given +date+ and +provider+.
"""
# First, calculate the difference between high and low
data = _get_intraday_data(date, provider)
diff = data['high'] - data['low']
# Next, calculate the moving average of the difference (and its moving
# average)
ewm9 = pd.Series.ewm(diff, span=9).mean()
ewm99 = pd.Series.ewm(ewm9, span=9).mean()
ratio = ewm9/ewm99
# Sum and return
mass_index = ratio.rolling(25).sum()/250
return nd.array(mass_index.values, utils.try_gpu(0))
def _to_intraday_vortex(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndarrays consisting of the positive and negative vortex
indicators of a data series for a given +date+ and +provider+, taken over
a given +period+.
"""
# First, calculate the True Range
data = _get_intraday_data(date, provider)
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
high_low = (data.high - data.low).abs()
low_close = (data.low - prev_close).abs()
high_close = (data.high - prev_close).abs()
true_range = pd.concat([high_low, low_close, high_close], axis=1).min(axis=1)
# Next, calculate the vortex moements
prev_low = data.low.shift(periods=1, fill_value=data.low[0])
prev_high = data.high.shift(periods=1, fill_value=data.high[0])
vm_up = (data.high - prev_low).abs()
vm_down = (data.low - prev_high).abs()
# Finally, calculate the indicator itself
true_range_sum = true_range.rolling(period).sum()
vm_up_sum = vm_up.rolling(period).sum()
vm_down_sum = vm_down.rolling(period).sum()
vi_up = vm_up_sum / true_range_sum
vi_down = vm_down_sum / true_range_sum
# Return VI+ and VI- values
return nd.array(vi_up.values, utils.try_gpu(0)), \
nd.array(vi_down.values, utils.try_gpu(0))
def _to_intraday_rsi(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute Relative Strength Index of
a data series for a given +date+ and +provider+ over a given +period+.
"""
# First, calculate the per-period up and down movements
data = _get_intraday_data(date, provider)
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
movement = data.close - prev_close
move_up = movement.where(movement > 0, 0)
move_down = -movement.where(movement < 0, 0)
# Calculate the relative strength and relative strength index
ewm_up = pd.Series.ewm(move_up, span=period).mean()
ewm_down = pd.Series.ewm(move_down, span=period).mean()
rs = ewm_up/ewm_down
rsi = 1 - 1/(1 + rs)
return nd.array(rsi.values, utils.try_gpu(0))
def _to_intraday_stochastic(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns a pair of ndarrays consisting of the %K and %D values associated
with the stochastic oscillator framework for a given +date+ and +provider+,
taken over a given +period+.
"""
# First, get the total highs and lows over the previous 30 time periods.
data = _get_intraday_data(date, provider)
high = data.high.rolling(period).max()
low = data.low.rolling(period).min()
# Next, calculate the %K and %D
pK = (data.close - low) / (high - low)
pD = pK.rolling(3).mean()
# Return them
return nd.array(pK.values, utils.try_gpu(0)), \
nd.array(pD.values, utils.try_gpu(0))
def _to_intraday_accdist(date: pd.Timestamp, provider: providers.DataProvider) \
-> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute Accumulation/Distribution
Index of a data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, calculate the Current Money Flow Volume
cmfv = (2 * data.close) - (data.high + data.low)
cmfv *= (data.volume / 1000) / (0.0001 + data.high - data.low)
# Now generate the Acc/Dist index for each timestamp
accdist = np.empty(len(cmfv))
accdist[0] = cmfv.iloc[0]
for i in range(1, len(cmfv)):
accdist[i] = accdist[i - 1] + cmfv.iloc[i]
# Return the Acc/Dist index
return nd.array(accdist / np.linalg.norm(accdist), utils.try_gpu(0))
def _to_intraday_mfi(date: pd.Timestamp, provider: providers.DataProvider,
period: int) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute Money Flow Index of a data
series for a given +date+ and +provider+ accross a given +period+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, calculate the typical price and money_flow
typical_price = (data.high + data.low + data.close) / 3
money_flow = typical_price * data.volume
# Find the positive and negative money flows
prev_typical_price = typical_price.shift(periods=1,
fill_value=typical_price[0])
positive_flow = money_flow.where(typical_price > prev_typical_price, 0)
negative_flow = money_flow.where(typical_price < prev_typical_price, 0)
# Sum over the window and return the ratio
positive = positive_flow.rolling(period).sum()
negative = negative_flow.rolling(period).sum()
mfi = positive / (positive + negative)
return nd.array(mfi.values, utils.try_gpu(0))
def _to_intraday_vpt(date: pd.Timestamp, provider: providers.DataProvider) \
-> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute Volume Price Trend of a
data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, multiply the change by the volume
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
vpt = data.volume * (data.close - prev_close) / prev_close
# Return the VPT
return nd.array(np.cumsum(vpt.values), utils.try_gpu(0))
def _to_intraday_obv(date: pd.Timestamp, provider: providers.DataProvider,
period: int = 45) -> nd.NDArray:
"""
Returns an ndarray consisting of the per-minute On Balance Volume of a
data series for a given +date+ and +provider+.
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Get the positive and negative volume
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
vol_pos = data.volume.where((data.close - prev_close) > 0, 0)
vol_neg = -data.volume.where((data.close - prev_close) < 0, 0)
# Cumulatively sum them
cum_vol = np.cumsum(vol_pos.values + vol_neg.values)
# Return the OBV
return nd.array(cum_vol, utils.try_gpu(0))
def _to_intraday_dysart(date: pd.Timestamp, provider: providers.DataProvider) \
-> Tuple[nd.NDArray, nd.NDArray]:
"""
Returns the Dysart Negative and Positive Volume Indicies for a data series
for a given +date+ and +provider+
"""
# First, get the data
data = _get_intraday_data(date, provider)
# Next, get the relative price changes
prev_close = data.close.shift(periods=1, fill_value=data.close[0])
change = (data.close - prev_close) / prev_close
# Separate decreased and increased volume days
prev_volume = data.volume.shift(periods=1, fill_value=data.volume[0])
vol_inc = change.where(data.volume > prev_volume, 0)
vol_dec = change.where(data.volume < prev_volume, 0)
# Perform cumulative sum to generate PVI and NVI
pvi = | np.cumsum(vol_inc.values) | numpy.cumsum |
import re
import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy.special import comb, perm
def calculate_2():
r_3s = []
delta = 0.01
for ploy in datas:
n = 3
c_x, c_y = ploy[0]
r_3_sum = 0
remain = 0 # 每段线剩下的长度
dots = []
for i in range(len(ploy[1])):
x1, y1 = ploy[1][i]
x2, y2 = ploy[1][(i + 1) % len(ploy[1])]
if x1 == x2:
y1, x1 = ploy[1][i]
y2, x2 = ploy[1][(i + 1) % len(ploy[1])]
k = (y2 - y1) / (x2 - x1)
b = y2 - k * x2
remain_x = remain / (k ** 2 + 1)
delta_x = delta / (k ** 2 + 1)
for j in range(round((np.math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)-remain) / delta - 0.5)):
dot_x = x1 + remain_x + j * delta_x
dot_y = k * dot_x + b
dots.append([dot_x - c_x, dot_y - c_y])
remain = np.math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) % 0.1
# print(i, round(np.math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) / delta - 0.5))
for i in range(len(dots) - 1):
r_3 = (dots[i][0]**2 + dots[i][1]**2)**((n-1)/2)
r_3 *= np.math.sqrt(((dots[i][0]-dots[i+1][0])**2 + (dots[i][1]-dots[i+1][1])**2))
r_3_sum += r_3
print(r_3_sum)
r_3s.append(r_3_sum)
def calculate_4():
r_3s = []
m = 10000
for ploy in datas:
n = 4
c_x, c_y = ploy[0]
r_3_sum = 0
for i in range(len(ploy[1])):
x1, y1 = ploy[1][i]
x2, y2 = ploy[1][(i + 1) % len(ploy[1])]
l = np.math.sqrt((x2-x1)**2 + (y2-y1)**2)/m
for j in range(m):
dot_x = x1 + (x2 - x1) / m * (j+1/2) - c_x
dot_y = y1 + (y2 - y1) / m * (j+1/2) - c_y
r_3_sum += (dot_y**2 + dot_x**2)**((n-1)/2)*l
print(str(r_3_sum) + '\t' + str((r_3_sum / np.math.pi / 2) ** (1/n)))
r_3s.append(r_3_sum)
def calculate_5():
j = 2
r_5s = []
for ploy in datas:
r_5_sum = 0
c_x, c_y = ploy[0]
for i in range(len(ploy[1])):
x1, y1 = ploy[1][i]
x2, y2 = ploy[1][(i + 1) % len(ploy[1])]
x1 -= c_x
x2 -= c_x
y1 -= c_y
y2 -= c_y
if x1 == x2:
temp = x1
x1 = y1
y1 = temp
temp = x2
x2 = y2
y2 = temp
c = x1 - x2
b = y1 - y2
a = np.math.sqrt(b**2 + c**2) / abs(c)**(j*2+1)
j_sum = 0
for p in range(j+1):
q_sum = 0
for q in range(2*p+1):
q_value = comb(2*p, q)
q_value *= b**q * c**(2*j - 2*p)
q_value *= (c*y1 - b*x1)**(2*p - q)
q_value *= x2**(2*j - 2*p + q + 1) - x1**(2*j - 2*p + q + 1)
q_value /= 2*j - 2*p + q + 1
q_sum += q_value
j_sum += comb(j, p) * q_sum
r_5_sum += abs(a*j_sum)
r_5s.append(r_5_sum)
print(str(r_5_sum) + '\t' + str((r_5_sum / np.math.pi / 2) ** (1/(2*j+1))))
def calculate_3():
r_3s = []
for ploy in datas:
r_3_sum = 0
c_x, c_y = ploy[0]
for i in range(len(ploy[1])):
x1, y1 = ploy[1][i]
x2, y2 = ploy[1][(i + 1) % len(ploy[1])]
x1 -= c_x
x2 -= c_x
y1 -= c_y
y2 -= c_y
if x1 == x2:
temp = x1; x1 = y1; y1 = temp
temp = x2; x2 = y2; y2 = temp
a = x1 - x2
b = y1 - y2
r3 = (a**2+b**2)*(x2**3-x1**3)/3
r3 += 2*(b)*(x1*y2 - x2*y1)*(x2**2 - x1**2)/2
r3 += (-a)*(x1*(b) + y1*(-a))**2
r3 *= np.math.sqrt(b**2+a**2)/a**3
r3 = abs(r3)
r_3_sum += r3
r_3s.append(r_3_sum)
print(str(r_3_sum) + '\t' + str((r_3_sum / np.math.pi / 2) ** (1/3)))
# print((r_3_sum / np.math.pi) ** (1/3))
def add_circle():
p_num = 6
delta_theta = 2 * np.math.pi / p_num
pts = []
for i in range(p_num):
theta = i * delta_theta
x = 1 * | np.math.cos(theta) | numpy.math.cos |
import os
import pickle
import librosa
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings('ignore')
from scipy.stats import skew, kurtosis
from pychorus import find_and_output_chorus
from flask import Flask, request, json, render_template
# Create flask app
app = Flask(__name__)
# Load pkl model
model = pickle.load(open('Your model name here', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods = ['POST'])
def predict():
song_link = list(request.form.values())[0]
# get features from songs
data = []
d, cols = extract_features(song_link)
data.append(d)
dataset = pd.DataFrame(data, columns=cols)
# select features which we used in ml model
df = pd.read_csv('Data/bestfeatures.csv')
columns = list(df['feature'][df['type']=='best'])
X = dataset[columns]
# making prediction
prediction = model.predict(X)
output = 'Unpopular' if prediction[0] == 0 else 'Popular'
return render_template('index.html', prediction_text = f'The song is {output}')
def statistics(list, feature, columns_name, data):
i = 0
for ele in list:
_skew = skew(ele)
columns_name.append(f'{feature}_kew_{i}')
min = np.min(ele)
columns_name.append(f'{feature}_min_{i}')
max = | np.max(ele) | numpy.max |
import numpy as np
import torch
refDict = {}
def integerEncoding(char):
global refDict
if char not in refDict.keys():
try:
refDict[char] = max(refDict.values()) + 1
except:
refDict[char] = 0
#print(refDict)
return refDict[char]
def oneHotEncoding(minibatch,minibatch_size=1,sequence_size=1,len_refDict = 0):
if not len_refDict:
len_refDict = len(refDict)
encoded = np.zeros((sequence_size,minibatch_size,len_refDict))
for i in range(len(minibatch)):
for j in range(len(minibatch[i])):
if minibatch[i][j] >= 0:
encoded[j,i,minibatch[i][j]] = 1
pad_length = int(minibatch_size * sequence_size - np.sum(encoded))
return (encoded,pad_length)
def getMinibatches(inputFile, batch_size, sequence_length):
character_indices = []
trainFile = open("../pa4Data/pa4Data/train.txt","r")
while True:
c1 = trainFile.read(1)
if not c1:
break
integerEncoding(c1)
while True:
c = inputFile.read(1)
if not c:
break
character_indices.append(integerEncoding(c))
character_indices = np.array(character_indices)
#print(character_indices[:200])
#Create 100 character sequences
chunks = []
targetchunks = []
for i in range(len(character_indices)//sequence_length):
chunks.append(character_indices[i*sequence_length:(i+1)*sequence_length])
targetchunks.append(character_indices[(i)*sequence_length+1:(i+1)*sequence_length+1])
i = len(character_indices)//sequence_length - 1
chunks.append(character_indices[(i+1)*sequence_length:])
targetchunks.append(character_indices[(i+1)*sequence_length+1:])
targetchunks[-1] = np.append(targetchunks[-1],np.array([-99]))
minibatches = []
target_minibatches = []
skip_length = int(np.ceil(len(chunks)/batch_size))
for i in range((skip_length)):
minibatches.append([])
target_minibatches.append([])
for i in range(len(chunks)):
minibatches[i%skip_length].append(np.array(chunks[i]))
for i in range(len(targetchunks)):
target_minibatches[i%skip_length].append(np.array(targetchunks[i]))
#for j in range(len(chunks)//batch_size):
# minibatches.append(np.array(chunks[j*batch_size:(j+1)*batch_size]))
# target_minibatches.append(np.array(targetchunks[j*batch_size:(j+1)*batch_size]))
#j = len(chunks)//batch_size -1
#minibatches.append(np.array(chunks[(j+1)*batch_size:]))
#target_minibatches.append(np.array(targetchunks[(j+1)*batch_size:]))
minibatches = np.array(minibatches)
target_minibatches = np.array(target_minibatches)
return zip(minibatches,target_minibatches)
#Unit test
if __name__ == "__main__":
f = open("../pa4Data/pa4Data/train.txt","r")
minibatch_size = 1
sequence_size = 100
iterator = getMinibatches(f,minibatch_size,sequence_size)
loss = torch.nn.CrossEntropyLoss()
first_minibatch = None
second_minibatch = None
for minibatch,target_minibatch in iterator:
if first_minibatch is None:
first_minibatch = minibatch.copy()
elif second_minibatch is None:
second_minibatch = minibatch.copy()
minibatch = | np.array(minibatch) | numpy.array |
import copy
import numpy as np
def Is_Point_Close_To_Id_In_Raster(prow, pcol, nrows, ncols, id, raster_array):
"""Check if the point is around grids with value equal to Id in raster_array
Parameters
----------
Returns:
-------
Isclose : logical
True : close to grids with value euqal to id
False : not close to grids with value to id
"""
Isclose = False
n_grids_eq_id = 0
if prow != 0 and prow != nrows - 1 and pcol != 0 and pcol != ncols - 1:
if raster_array[prow - 1, pcol + 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow - 1, pcol - 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow - 1, pcol] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow, pcol + 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow, pcol - 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow + 1, pcol - 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow + 1, pcol + 1] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
if raster_array[prow + 1, pcol] == id:
Isclose = True
n_grids_eq_id = n_grids_eq_id + 1
return Isclose, n_grids_eq_id
def CE_mcat4lake2(
cat1, lake, fac, fdir, bsid, nrows, ncols, Pourpoints, noncnlake, str_array
):
cat = copy.copy(cat1)
Non_con_lake_cat = copy.copy(cat1)
Non_con_lake_cat[:, :] = -9999
arlakeid = np.unique(lake)
arlakeid = arlakeid[arlakeid > 0]
noncnlake = np.unique(noncnlake)
noncnlake = noncnlake[noncnlake > 0]
for i in range(0, len(arlakeid)):
lakeid = arlakeid[i]
lrowcol = np.argwhere(lake == lakeid).astype(int)
lakacc = np.full((len(lrowcol), 3), -9999)
lakacc[:, 0] = lrowcol[:, 0]
lakacc[:, 1] = lrowcol[:, 1]
lakacc[:, 2] = fac[lrowcol[:, 0], lrowcol[:, 1]]
lakacc = lakacc[lakacc[:, 2].argsort()]
lorow = lakacc[len(lakacc) - 1, 0]
locol = lakacc[len(lakacc) - 1, 1] ###### lake outlet row and col
arclakeid = cat1[lorow, locol]
pp = Pourpoints[lorow, locol]
pp = np.unique(pp)
pp = pp[pp > 0]
# print(lakeid,len(np.argwhere(noncnlake==lakeid)),pp,Pourpoints[lorow,locol],arclakeid)
if len(pp) == 1:
if arclakeid < 0:
cat[lrowcol[:, 0], lrowcol[:, 1]] = pp
else:
cat[lrowcol[:, 0], lrowcol[:, 1]] = arclakeid
if (
len(np.argwhere(noncnlake == lakeid)) > 0
): ##if lake belong to non connected lakes
if arclakeid < 0:
nonlrowcol = np.argwhere(cat == pp).astype(int)
strids = np.unique(str_array[nonlrowcol[:, 0], nonlrowcol[:, 1]])
if (
len(strids) <= 1
): ## if none connected lake catchment overlay with stream, remove this lake
Non_con_lake_cat[nonlrowcol[:, 0], nonlrowcol[:, 1]] = lakeid
else:
cat[lrowcol[:, 0], lrowcol[:, 1]] = -99
else:
nonlrowcol = np.argwhere(cat == arclakeid).astype(int)
strids = np.unique(str_array[nonlrowcol[:, 0], nonlrowcol[:, 1]])
if (
len(strids) <= 1
): ## if none connected lake catchment overlay with stream, remove this lake
Non_con_lake_cat[nonlrowcol[:, 0], nonlrowcol[:, 1]] = lakeid
else:
cat[lrowcol[:, 0], lrowcol[:, 1]] = -99
#### For some reason pour point was missing in non-contribute catchment
Pours = | np.unique(Pourpoints) | numpy.unique |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_evaluation_certainty_equiv [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_evaluation_certainty_equiv&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=EBcertequivexputilfun).
# +
import numpy as np
import matplotlib.pyplot as plt
from arpym.tools import histogram_sp
from arpym.statistics import simulate_normal
from arpym.tools import solve_riccati, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-parameters)
j_ = 10**5 # number of scenarios
v_tnow = np.array([1, 1]) # current values
mu = np.array([0, 0]) # instruments P&L's expectations
h = np.array([45, 55]) # portfolio holdings
lambda_ = np.array([1 / 150, 1 / 200, 1 / 300]) # risk aversion parameters
rho = -0.5 # correlation parameter
# standard deviations appearing in the P&L's distributions
sig_11, sig_22 = 0.1, 0.3
sig2 = np.array([[(sig_11) ** 2, rho*sig_11*sig_22],
[rho*sig_11*sig_22, (sig_22) ** 2]])
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_evaluation_certainty_equiv-implementation-step01): Simulate j_ scenarios for the instruments P&L's
n_ = len(h) # number of the instruments
# scenarios for the standard normal random variable Z
z = simulate_normal(np.zeros(n_), | np.eye(n_) | numpy.eye |
import numpy as np
import pandas as pd
from utils.adam import Adam
from gym.spaces import Box
from tqdm import trange
def eval_policy(pi, env, num_episodes=50, gamma=0.99, horizon=50, stochastic=False):
rets = []
num_stops = []
avg_damages = []
total_times = []
for i in range(num_episodes):
state = env.reset()
ret = 0
done = False
num_pits = 0
avg_damage = 0
t = 0
while not done and t < horizon:
a, _, _, _ = pi.step(state, stochastic=stochastic)
state, reward, done, info = env.step(a)
#num_pits += (1 if a == 0 else 0)
#tire_damage = state[1]
#avg_damage += tire_damage
ret += gamma ** t * reward
t += 1
if done:
#avg_damage /= t
break
rets.append(ret)
num_stops.append(num_pits)
avg_damages.append(avg_damage)
#total_times.append(env.time)
return rets, num_stops, avg_damages, total_times
def create_batch_trajectories(pi,
env,
batch_size,
horizon,
render=False,
stochastic=True,
gamma=0.99):
rets = []
state_dim = env.observation_space.shape[0]
action_dim = 1
if isinstance(env.action_space, Box):
action_dim = env.action_space.shape[0]
states = np.zeros((batch_size, horizon, state_dim))
actions = np.zeros((batch_size, horizon, action_dim))
rewards = np.zeros((batch_size, horizon))
for batch in range(batch_size):
state = env.reset()
done = False
ret = 0
for t in range(horizon):
action, _, _, _ = pi.step(state, stochastic=stochastic)
next_state, reward, done, info = env.step(action)
states[batch, t] = state
actions[batch, t] = action
rewards[batch, t] = reward
ret += gamma ** t * reward
if done:
# print(rewards[batch])
# print('done')
rewards[batch, t + 1:] = 0
actions[batch, t + 1:] = env.action_space.sample()
states[batch, t + 1:] = next_state
break
state = next_state
rets.append(ret)
return states, actions, rewards, rets
def learn(
pi,
env,
max_iterations=1000,
batch_size=50,
eval_frequency=50,
eval_episodes=50,
horizon=50,
gamma=0.99,
logdir='./',
lr=0.1):
"""
@param pi: Policy to optimize
@type pi: Policy
@param env: Environment
@type env: gym.Env
@param max_iterations: number of gradient steps
@type max_iterations: int
@param batch_size: Number of trajectories in a gradient batch
@type batch_size: int
@param eval_frequency: frequency of evaluation of the policy
@type eval_frequency: int
@param eval_episodes: number of episodes of evaluation
@type eval_episodes: int
@param logdir: directory where to save outputs
@type logdir: str
@param horizon: environment horizon
@type horizon: int
@param gamma: discount factor
@type gamma: float
@param lr: learning rate
@type lr: float
@return: optimized policy
@rtype: Policy
"""
offline_scores = []
online_scores = []
params = pi.get_weights()
best_eval = -np.inf
optimizer = Adam(learning_rate=lr, ascent=True, size=params.shape[0])
df = pd.DataFrame(columns=['disc_return', 'n_pits', 'damage', 'time', 'n_ep'])
df.to_csv(logdir + "offline_scores.csv")
pb = trange(max_iterations)
policy_perf = 0
mean_return = 0
grad_norm = 0
print("Evaluation will be on %d episodes" % eval_episodes)
for it in pb:
params = pi.get_weights()
if it % eval_frequency == 0:
pb.set_description("Ret %f # grad_norm %f # Evaluating..." % (mean_return, grad_norm))
# print("Evaluating policy for %d episodes" % (eval_episodes))
rets, stops, damages, total_times = eval_policy(pi, env, num_episodes=eval_episodes, stochastic=False, horizon=horizon)
mean_return = np.mean(rets)
mean_pit_count = np.mean(stops)
mean_damage = np.mean(damages)
mean_time = np.mean(total_times)
policy_perf = mean_return
# print("Policy performance %f" % (mean_return))
df = pd.DataFrame({'disc_return': [mean_return],
'n_pits': mean_pit_count,
'damage': mean_damage,
'time': mean_pit_count,
'n_ep': eval_episodes})
df.to_csv(logdir + "offline_scores.csv", mode='a', header=False, index=False)
offline_scores.append([mean_return, mean_pit_count, mean_damage, mean_time])
np.save(logdir + 'offline_scores.npy', offline_scores)
pi.save(logdir + 'last')
if mean_return > best_eval:
pi.save(logdir + 'best')
best_eval = mean_return
states, actions, rewards, rets = create_batch_trajectories(pi, env, horizon=horizon, batch_size=batch_size,
stochastic=True, gamma=gamma)
mean_return = np.mean(rets)
online_scores.append([mean_return, np.std(rets)])
np.save(logdir + 'online_scores.npy', online_scores)
grad = compute_gradient(pi, states, actions, rewards, gamma=gamma, num_params=params.shape[0])
#grad_2 = compute_gradient_2(pi, states, actions, rewards, gamma=gamma, num_params=params.shape[0])
#diff = grad_2 - grad
# step = optimizer.update(grad)
step = lr * grad
grad_norm = np.linalg.norm(grad)
pb.set_description("Ret %f # grad_norm %f # Last eval %f" % (mean_return, grad_norm, policy_perf))
# print("Iteration %d \t Return %f \t grad_norm %f" % ((it + 1), mean_return, grad_norm))
pi.set_weights(params + step)
return pi
def gradient_est(pi, batch_size, len_trajectories, states, actions, num_params):
gradients = np.zeros((batch_size, len_trajectories, num_params))
for b in range(batch_size):
for t in range(len_trajectories):
action = actions[b, t]
if np.isnan(action[0]):
gradients[b, t, :] = np.zeros_like(gradients[b, t, :])
else:
state = np.array(states[b, t])
grads = pi.compute_gradients(state, action)[0]
gradients[b, t, :] = grads
# gradients[b, t, :] = pi.compute_grad() (((action - np.dot(param.T, state)).reshape(-1, 1) * np.array(
# [state, state])).T / var_policy).reshape(-1)
return gradients
def compute_gradient(pi, states, actions, rewards, num_params, gamma=0.99):
batch_size, horizon, obs_dim = states.shape[:]
discount_factor_timestep = np.power(gamma * | np.ones(horizon) | numpy.ones |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
import re
import copy
import warnings
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
from scipy.spatial.distance import cdist
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from astropy.coordinates import SkyCoord, Distance, EarthLocation
from astropy import units
import erfa
from . import _utils
__all__ = [
"POL_STR2NUM_DICT",
"POL_NUM2STR_DICT",
"CONJ_POL_DICT",
"JONES_STR2NUM_DICT",
"JONES_NUM2STR_DICT",
"LatLonAlt_from_XYZ",
"XYZ_from_LatLonAlt",
"rotECEF_from_ECEF",
"ECEF_from_rotECEF",
"ENU_from_ECEF",
"ECEF_from_ENU",
"phase_uvw",
"unphase_uvw",
"uvcalibrate",
"apply_uvflag",
"get_lst_for_time",
"polstr2num",
"polnum2str",
"jstr2num",
"jnum2str",
"parse_polstr",
"parse_jpolstr",
"conj_pol",
"reorder_conj_pols",
"baseline_to_antnums",
"antnums_to_baseline",
"baseline_index_flip",
"get_baseline_redundancies",
"get_antenna_redundancies",
"collapse",
"mean_collapse",
"absmean_collapse",
"quadmean_collapse",
"or_collapse",
"and_collapse",
]
# fmt: off
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4,
"I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names
"rr": -1, "ll": -2, "rl": -3, "lr": -4,
"xx": -5, "yy": -6, "xy": -7, "yx": -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV",
-1: "rr", -2: "ll", -3: "rl", -4: "lr",
-5: "xx", -6: "yy", -7: "xy", -8: "yx"}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy",
"ee": "ee", "nn": "nn", "en": "ne", "ne": "en",
"rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl",
"I": "I", "Q": "Q", "U": "U", "V": "V",
"pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"}
# maps jones matrix element strings to jones integers
# Add entries that don't start with "J" to allow shorthand versions
JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8,
"xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8,
"Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4,
"rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr",
-5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"}
# maps uvdata pols to input feed polarizations
POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"],
"xy": ["x", "y"], "yx": ["y", "x"],
"ee": ["e", "e"], "nn": ["n", "n"],
"en": ["e", "n"], "ne": ["n", "e"],
"rr": ["r", "r"], "ll": ["l", "l"],
"rl": ["r", "l"], "lr": ["l", "r"]}
# fmt: on
def _get_iterable(x):
"""Return iterable version of input."""
if isinstance(x, Iterable):
return x
else:
return (x,)
def _fits_gethduaxis(hdu, axis):
"""
Make axis arrays for fits files.
Parameters
----------
hdu : astropy.io.fits HDU object
The HDU to make an axis array for.
axis : int
The axis number of interest (1-based).
Returns
-------
ndarray of float
Array of values for the specified axis.
"""
ax = str(axis)
axis_num = hdu.header["NAXIS" + ax]
val = hdu.header["CRVAL" + ax]
delta = hdu.header["CDELT" + ax]
index = hdu.header["CRPIX" + ax] - 1
return delta * (np.arange(axis_num) - index) + val
def _fits_indexhdus(hdulist):
"""
Get a dict of table names and HDU numbers from a FITS HDU list.
Parameters
----------
hdulist : list of astropy.io.fits HDU objects
List of HDUs to get names for
Returns
-------
dict
dictionary with table names as keys and HDU number as values.
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header["EXTNAME"]] = i
except (KeyError):
continue
return tablenames
def _get_fits_extra_keywords(header, keywords_to_skip=None):
"""
Get any extra keywords and return as dict.
Parameters
----------
header : FITS header object
header object to get extra_keywords from.
keywords_to_skip : list of str
list of keywords to not include in extra keywords in addition to standard
FITS keywords.
Returns
-------
dict
dict of extra keywords.
"""
# List standard FITS header items that are still should not be included in
# extra_keywords
# These are the beginnings of FITS keywords to ignore, the actual keywords
# often include integers following these names (e.g. NAXIS1, CTYPE3)
std_fits_substrings = [
"HISTORY",
"SIMPLE",
"BITPIX",
"EXTEND",
"BLOCKED",
"GROUPS",
"PCOUNT",
"BSCALE",
"BZERO",
"NAXIS",
"PTYPE",
"PSCAL",
"PZERO",
"CTYPE",
"CRVAL",
"CRPIX",
"CDELT",
"CROTA",
"CUNIT",
]
if keywords_to_skip is not None:
std_fits_substrings.extend(keywords_to_skip)
extra_keywords = {}
# find all the other header items and keep them as extra_keywords
for key in header:
# check if key contains any of the standard FITS substrings
if np.any([sub in key for sub in std_fits_substrings]):
continue
if key == "COMMENT":
extra_keywords[key] = str(header.get(key))
elif key != "":
extra_keywords[key] = header.get(key)
return extra_keywords
def _check_history_version(history, version_string):
"""Check if version_string is present in history string."""
if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""):
return True
else:
return False
def _check_histories(history1, history2):
"""Check if two histories are the same."""
if history1.replace("\n", "").replace(" ", "") == history2.replace(
"\n", ""
).replace(" ", ""):
return True
else:
return False
def _combine_history_addition(history1, history2):
"""
Find extra history to add to have minimal repeats.
Parameters
----------
history1 : str
First history.
history2 : str
Second history
Returns
-------
str
Extra history to add to first history.
"""
# first check if they're the same to avoid more complicated processing.
if _check_histories(history1, history2):
return None
hist2_words = history2.split(" ")
add_hist = ""
test_hist1 = " " + history1 + " "
for i, word in enumerate(hist2_words):
if " " + word + " " not in test_hist1:
add_hist += " " + word
keep_going = i + 1 < len(hist2_words)
while keep_going:
if (hist2_words[i + 1] == " ") or (
" " + hist2_words[i + 1] + " " not in test_hist1
):
add_hist += " " + hist2_words[i + 1]
del hist2_words[i + 1]
keep_going = i + 1 < len(hist2_words)
else:
keep_going = False
if add_hist == "":
add_hist = None
return add_hist
def baseline_to_antnums(baseline, Nants_telescope):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of ints
baseline number
Nants_telescope : int
number of antennas
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
if Nants_telescope > 2048:
raise Exception(
"error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope)
)
return_array = isinstance(baseline, (np.ndarray, list, tuple))
ant1, ant2 = _utils.baseline_to_antnums(
np.ascontiguousarray(baseline, dtype=np.int64)
)
if return_array:
return ant1, ant2
else:
return ant1.item(0), ant2.item(0)
def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
Nants_telescope : int
number of antennas
attempt256 : bool
Option to try to use the older 256 standard used in
many uvfits files (will use 2048 standard if there are more
than 256 antennas). Default is False.
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
if Nants_telescope is not None and Nants_telescope > 2048:
raise Exception(
"cannot convert ant1, ant2 to a baseline index "
"with Nants={Nants}>2048.".format(Nants=Nants_telescope)
)
return_array = isinstance(ant1, (np.ndarray, list, tuple))
baseline = _utils.antnums_to_baseline(
np.ascontiguousarray(ant1, dtype=np.int64),
np.ascontiguousarray(ant2, dtype=np.int64),
attempt256=attempt256,
)
if return_array:
return baseline
else:
return baseline.item(0)
def baseline_index_flip(baseline, Nants_telescope):
"""Change baseline number to reverse antenna order."""
ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)
return antnums_to_baseline(ant2, ant1, Nants_telescope)
def _x_orientation_rep_dict(x_orientation):
"""Create replacement dict based on x_orientation."""
if x_orientation.lower() == "east" or x_orientation.lower() == "e":
return {"x": "e", "y": "n"}
elif x_orientation.lower() == "north" or x_orientation.lower() == "n":
return {"x": "n", "y": "e"}
else:
raise ValueError("x_orientation not recognized.")
def polstr2num(pol, x_orientation=None):
"""
Convert polarization str to number according to AIPS Memo 117.
Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes, but also supports 'I', 'Q', 'U', 'V'.
Parameters
----------
pol : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
Number corresponding to string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
poldict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(pol, str):
out = poldict[pol.lower()]
elif isinstance(pol, Iterable):
out = [poldict[key.lower()] for key in pol]
else:
raise ValueError(
"Polarization {p} cannot be converted to a polarization number.".format(
p=pol
)
)
return out
def polnum2str(num, x_orientation=None):
"""
Convert polarization number to str according to AIPS Memo 117.
Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes
Parameters
----------
num : int
polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
String corresponding to polarization number
Raises
------
ValueError
If the polarization number cannot be converted to a polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(num, (int, np.int32, np.int64)):
out = dict_use[num]
elif isinstance(num, Iterable):
out = [dict_use[i] for i in num]
else:
raise ValueError(
"Polarization {p} cannot be converted to string.".format(p=num)
)
return out
def jstr2num(jstr, x_orientation=None):
"""
Convert jones polarization str to number according to calfits memo.
Parameters
----------
jstr : str
antenna (jones) polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
antenna (jones) polarization number corresponding to string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
jdict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(jstr, str):
out = jdict[jstr.lower()]
elif isinstance(jstr, Iterable):
out = [jdict[key.lower()] for key in jstr]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to index.".format(j=jstr)
)
return out
def jnum2str(jnum, x_orientation=None):
"""
Convert jones polarization number to str according to calfits memo.
Parameters
----------
num : int
antenna (jones) polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
antenna (jones) polarization string corresponding to number
Raises
------
ValueError
If the jones polarization number cannot be converted to a jones
polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(jnum, (int, np.int32, np.int64)):
out = dict_use[jnum]
elif isinstance(jnum, Iterable):
out = [dict_use[i] for i in jnum]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to string.".format(j=jnum)
)
return out
def parse_polstr(polstr, x_orientation=None):
"""
Parse a polarization string and return pyuvdata standard polarization string.
See utils.POL_STR2NUM_DICT for options.
Parameters
----------
polstr : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
str
AIPS Memo 117 standard string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return polnum2str(
polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def parse_jpolstr(jpolstr, x_orientation=None):
"""
Parse a Jones polarization string and return pyuvdata standard jones string.
See utils.JONES_STR2NUM_DICT for options.
Parameters
----------
jpolstr : str
Jones polarization string
Returns
-------
str
calfits memo standard string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return jnum2str(
jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def conj_pol(pol):
"""
Return the polarization for the conjugate baseline.
For example, (1, 2, 'xy') = conj(2, 1, 'yx').
The returned polarization is determined by assuming the antenna pair is
reversed in the data, and finding the correct polarization correlation
which will yield the requested baseline when conjugated. Note this means
changing the polarization for linear cross-pols, but keeping auto-pol
(e.g. xx) and Stokes the same.
Parameters
----------
pol : str or int
Polarization string or integer.
Returns
-------
cpol : str or int
Polarization as if antennas are swapped (type matches input)
"""
cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}
if isinstance(pol, str):
cpol = cpol_dict[pol.lower()]
elif isinstance(pol, Iterable):
cpol = [conj_pol(p) for p in pol]
elif isinstance(pol, (int, np.int32, np.int64)):
cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])
else:
raise ValueError("Polarization not recognized, cannot be conjugated.")
return cpol
def reorder_conj_pols(pols):
"""
Reorder multiple pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after
conjugating the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Parameters
----------
pols : array_like of str or int
Polarization array (strings or ints).
Returns
-------
conj_order : ndarray of int
Indices to reorder polarization array.
"""
if not isinstance(pols, Iterable):
raise ValueError("reorder_conj_pols must be given an array of polarizations.")
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError(
"Not all conjugate pols exist in the polarization array provided."
)
return conj_order
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = xyz.ndim == 1
if squeeze:
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# checking for acceptable values
if check_acceptability:
norms = np.linalg.norm(xyz, axis=0)
if not all(np.logical_and(norms >= 6.35e6, norms <= 6.39e6)):
raise ValueError("xyz values should be ECEF x, y, z coordinates in meters")
# this helper function returns one 2D array because it is less overhead for cython
lla = _utils._lla_from_xyz(xyz)
if squeeze:
return lla[0, 0], lla[1, 0], lla[2, 0]
return lla[0], lla[1], lla[2]
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.ascontiguousarray(latitude, dtype=np.float64)
longitude = np.ascontiguousarray(longitude, dtype=np.float64)
altitude = np.ascontiguousarray(altitude, dtype=np.float64)
n_pts = latitude.size
if longitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
if altitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
xyz = _utils._xyz_from_latlonalt(latitude, longitude, altitude)
xyz = xyz.T
if n_pts == 1:
return xyz[0]
return xyz
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.asarray(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
squeeze = False
if xyz.ndim == 1:
squeeze = True
xyz = xyz[np.newaxis, :]
xyz = np.ascontiguousarray(xyz.T, dtype=np.float64)
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz, axis=0)
sensible_radius_range = (6.35e6, 6.39e6)
if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(
ecef_magnitudes >= sensible_radius_range[1]
):
raise ValueError(
"ECEF vector magnitudes must be on the order of the radius of the earth"
)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
enu = _utils._ENU_from_ECEF(
xyz,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
enu = enu.T
if squeeze:
enu = np.squeeze(enu)
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.asarray(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
raise ValueError("The expected shape of the ENU array is (Npts, 3).")
squeeze = False
if enu.ndim == 1:
squeeze = True
enu = enu[np.newaxis, :]
enu = np.ascontiguousarray(enu.T, dtype=np.float64)
# the cython utility expects (3, Npts) for faster manipulation
# transpose after we get the array back to match the expected shape
xyz = _utils._ECEF_from_ENU(
enu,
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
xyz = xyz.T
if squeeze:
xyz = np.squeeze(xyz)
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
return _utils._phase_uvw(
np.float64(ra),
np.float64(dec),
np.ascontiguousarray(initial_uvw.T, dtype=np.float64),
).T
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw.T, dtype=np.float64),
).T
def polar2_to_cart3(lon_array, lat_array):
"""
Convert 2D polar coordinates into 3D cartesian coordinates.
This is a simple routine for converting a set of spherical angular coordinates
into a 3D cartesian vectors, where the x-direction is set by the position (0, 0).
Parameters
----------
lon_array : float or ndarray
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians. Can either be a float or ndarray -- if the latter, must have
the same shape as lat_array.
lat_array : float or ndarray
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians. Can either be a float or ndarray -- if the latter, must have the same
shape as lat_array.
Returns
-------
xyz_array : ndarray of float
Cartesian coordinates of the given longitude and latitude on a unit sphere.
Shape is (3, coord_shape), where coord_shape is the shape of lon_array and
lat_array if they were provided as type ndarray, otherwise (3,).
"""
# Check to make sure that we are not playing with mixed types
if type(lon_array) is not type(lat_array):
raise ValueError(
"lon_array and lat_array must either both be floats or ndarrays."
)
if isinstance(lon_array, np.ndarray):
if lon_array.shape != lat_array.shape:
raise ValueError("lon_array and lat_array must have the same shape.")
# Once we know that lon_array and lat_array are of the same shape,
# time to create our 3D set of vectors!
xyz_array = np.array(
[
np.cos(lon_array) * np.cos(lat_array),
np.sin(lon_array) * np.cos(lat_array),
np.sin(lat_array),
],
dtype=float,
)
return xyz_array
def cart3_to_polar2(xyz_array):
"""
Convert 3D cartesian coordinates into 2D polar coordinates.
This is a simple routine for converting a set of 3D cartesian vectors into
spherical coordinates, where the position (0, 0) lies along the x-direction.
Parameters
----------
xyz_array : ndarray of float
Cartesian coordinates, need not be of unit vector length. Shape is
(3, coord_shape).
Returns
-------
lon_array : ndarray of float
Longitude coordinates, which increases in the counter-clockwise direction.
Units of radians, shape is (coord_shape,).
lat_array : ndarray of float
Latitude coordinates, where 0 falls on the equator of the sphere. Units of
radians, shape is (coord_shape,).
"""
if not isinstance(xyz_array, np.ndarray):
raise ValueError("xyz_array must be an ndarray.")
if xyz_array.ndim == 0:
raise ValueError("xyz_array must have ndim > 0")
if xyz_array.shape[0] != 3:
raise ValueError("xyz_array must be length 3 across the zeroth axis.")
# The longitude coord is relatively easy to calculate, just take the X and Y
# components and find the arctac of the pair.
lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float)
# If we _knew_ that xyz_array was always of length 1, then this call could be a much
# simpler one to arcsin. But to make this generic, we'll use the length of the XY
# component along with arctan2.
lat_array = np.arctan2(
xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float
)
# Return the two arrays
return lon_array, lat_array
def _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot):
"""
Apply a rotation matrix to a series of vectors.
This is a simple convenience function which wraps numpy's matmul function for use
with various vector rotation functions in this module. This code could, in
principle, be replaced by a cythonized piece of code, although the matmul function
is _pretty_ well optimized already. This function is not meant to be called by
users, but is instead used by multiple higher-level utility functions (namely those
that perform rotations).
Parameters
----------
xyz_array : ndarray of floats
Array of vectors to be rotated. When nrot > 1, shape may be (n_rot, 3, n_vec)
or (1, 3, n_vec), the latter is useful for when performing multiple rotations
on a fixed set of vectors. If nrot = 1, shape may be (1, 3, n_vec), (3, n_vec),
or (3,).
rot_matrix : ndarray of floats
Series of rotation matricies to be applied to the stack of vectors. Must be
of shape (n_rot, 3, 3)
n_rot : int
Number of individual rotation matricies to be applied.
Returns
-------
rotated_xyz : ndarray of floats
Array of vectors that have been rotated, of shape (n_rot, 3, n_vectors,).
"""
# Do a quick check to make sure that things look sensible
if rot_matrix.shape != (n_rot, 3, 3):
raise ValueError(
"rot_matrix must be of shape (n_rot, 3, 3), where n_rot=%i." % n_rot
)
if (xyz_array.ndim == 3) and (
(xyz_array.shape[0] not in [1, n_rot]) or (xyz_array.shape[-2] != 3)
):
raise ValueError("Misshaped xyz_array - expected shape (n_rot, 3, n_vectors).")
if (xyz_array.ndim < 3) and (xyz_array.shape[0] != 3):
raise ValueError("Misshaped xyz_array - expected shape (3, n_vectors) or (3,).")
rotated_xyz = np.matmul(rot_matrix, xyz_array)
return rotated_xyz
def _rotate_one_axis(xyz_array, rot_amount, rot_axis):
"""
Rotate an array of 3D positions around the a single axis (x, y, or z).
This function performs a basic rotation of 3D vectors about one of the priciple
axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount : float or ndarray of float
Amount (in radians) to rotate the given set of coordinates. Can either be a
single float (or ndarray of shape (1,)) if rotating all vectors by the same
amount, otherwise expected to be shape (Nrot,).
rot_axis : int
Axis around which the rotation is applied. 0 is the x-axis, 1 is the y-axis,
and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# If rot_amount is None or all zeros, then this is just one big old no-op.
if (rot_amount is None) or np.all(rot_amount == 0.0):
if np.ndim(xyz_array) == 1:
return deepcopy(xyz_array[np.newaxis, :, np.newaxis])
elif np.ndim(xyz_array) == 2:
return deepcopy(xyz_array[np.newaxis, :, :])
else:
return deepcopy(xyz_array)
# Check and see how big of a rotation matrix we need
n_rot = 1 if (not isinstance(rot_amount, np.ndarray)) else (rot_amount.shape[0])
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.zeros((3, 3, n_rot), dtype=np.float64)
# Figure out which pieces of the matrix we need to update
temp_jdx = (rot_axis + 1) % 3
temp_idx = (rot_axis + 2) % 3
# Fill in the rotation matricies accordingly
rot_matrix[rot_axis, rot_axis] = 1
rot_matrix[temp_idx, temp_idx] = np.cos(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_jdx] = rot_matrix[temp_idx, temp_idx]
rot_matrix[temp_idx, temp_jdx] = np.sin(rot_amount, dtype=np.float64)
rot_matrix[temp_jdx, temp_idx] = -rot_matrix[temp_idx, temp_jdx]
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def _rotate_two_axis(xyz_array, rot_amount1, rot_amount2, rot_axis1, rot_axis2):
"""
Rotate an array of 3D positions sequentially around a pair of axes (x, y, or z).
This function performs a sequential pair of basic rotations of 3D vectors about
the priciple axes -- the x-axis, the y-axis, or the z-axis.
Note that the rotations here obey the right-hand rule -- that is to say, from the
perspective of the positive side of the axis of rotation, a positive rotation will
cause points on the plane intersecting this axis to move in a counter-clockwise
fashion.
Parameters
----------
xyz_array : ndarray of float
Set of 3-dimensional vectors be rotated, in typical right-handed cartesian
order, e.g. (x, y, z). Shape is (Nrot, 3, Nvectors).
rot_amount1 : float or ndarray of float
Amount (in radians) of rotatation to apply during the first rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_amount2 : float or ndarray of float
Amount (in radians) of rotatation to apply during the second rotation of the
sequence, to the given set of coordinates. Can either be a single float (or
ndarray of shape (1,)) if rotating all vectors by the same amount, otherwise
expected to be shape (Nrot,).
rot_axis1 : int
Axis around which the first rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
rot_axis2 : int
Axis around which the second rotation is applied. 0 is the x-axis, 1 is the
y-axis, and 2 is the z-axis.
Returns
-------
rotated_xyz : ndarray of float
Set of rotated 3-dimensional vectors, shape (Nrot, 3, Nvector).
"""
# Capture some special cases upfront, where we can save ourselves a bit of work
no_rot1 = (rot_amount1 is None) or np.all(rot_amount1 == 0.0)
no_rot2 = (rot_amount2 is None) or np.all(rot_amount2 == 0.0)
if no_rot1 and no_rot2:
# If rot_amount is None, then this is just one big old no-op.
return deepcopy(xyz_array)
elif no_rot1:
# If rot_amount1 is None, then ignore it and just work w/ the 2nd rotation
return _rotate_one_axis(xyz_array, rot_amount2, rot_axis2)
elif no_rot2:
# If rot_amount2 is None, then ignore it and just work w/ the 1st rotation
return _rotate_one_axis(xyz_array, rot_amount1, rot_axis1)
elif rot_axis1 == rot_axis2:
# Capture the case where someone wants to do a sequence of rotations on the same
# axis. Also known as just rotating a single axis.
return _rotate_one_axis(xyz_array, rot_amount1 + rot_amount2, rot_axis1)
# Figure out how many individual rotation matricies we need, accounting for the
# fact that these can either be floats or ndarrays.
n_rot = max(
rot_amount1.shape[0] if isinstance(rot_amount1, np.ndarray) else 1,
rot_amount2.shape[0] if isinstance(rot_amount2, np.ndarray) else 1,
)
n_vec = xyz_array.shape[-1]
# The promotion of values to float64 is to suppress numerical precision issues,
# since the matrix math can - in limited circumstances - introduce precision errors
# of order 10x the limiting numerical precision of the float. For a float32/single,
# thats a part in 1e6 (~arcsec-level errors), but for a float64 it translates to
# a part in 1e15.
rot_matrix = np.empty((3, 3, n_rot), dtype=np.float64)
# There are two permulations per pair of axes -- when the pair is right-hand
# oriented vs left-hand oriented. Check here which one it is. For example,
# rotating first on the x-axis, second on the y-axis is considered a
# "right-handed" pair, whereas z-axis first, then y-axis would be considered
# a "left-handed" pair.
lhd_order = np.mod(rot_axis2 - rot_axis1, 3) != 1
temp_idx = [
np.mod(rot_axis1 - lhd_order, 3),
np.mod(rot_axis1 + 1 - lhd_order, 3),
np.mod(rot_axis1 + 2 - lhd_order, 3),
]
# We're using lots of sin and cos calculations -- doing them once upfront saves
# quite a bit of time by eliminating redundant calculations
sin_lo = np.sin(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
cos_lo = np.cos(rot_amount2 if lhd_order else rot_amount1, dtype=np.float64)
sin_hi = np.sin(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
cos_hi = np.cos(rot_amount1 if lhd_order else rot_amount2, dtype=np.float64)
# Take care of the diagonal terms first, since they aren't actually affected by the
# order of rotational opertations
rot_matrix[temp_idx[0], temp_idx[0]] = cos_hi
rot_matrix[temp_idx[1], temp_idx[1]] = cos_lo
rot_matrix[temp_idx[2], temp_idx[2]] = cos_lo * cos_hi
# Now time for the off-diagonal terms, as a set of 3 pairs. The rotation matrix
# for a left-hand oriented pair of rotation axes (e.g., x-rot, then y-rot) is just
# a transpose of the right-hand orientation of the same pair (e.g., y-rot, then
# x-rot).
rot_matrix[temp_idx[0 + lhd_order], temp_idx[1 - lhd_order]] = sin_lo * sin_hi
rot_matrix[temp_idx[0 - lhd_order], temp_idx[lhd_order - 1]] = (
cos_lo * sin_hi * ((-1.0) ** lhd_order)
)
rot_matrix[temp_idx[1 - lhd_order], temp_idx[0 + lhd_order]] = 0.0
rot_matrix[temp_idx[1 + lhd_order], temp_idx[2 - lhd_order]] = sin_lo * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[lhd_order - 1], temp_idx[0 - lhd_order]] = sin_hi * (
(-1.0) ** (1 + lhd_order)
)
rot_matrix[temp_idx[2 - lhd_order], temp_idx[1 + lhd_order]] = (
sin_lo * cos_hi * ((-1.0) ** (lhd_order))
)
# The rot matrix was shape (3, 3, n_rot) to help speed up filling in the elements
# of each matrix, but now we want to flip it into its proper shape of (n_rot, 3, 3)
rot_matrix = np.transpose(rot_matrix, axes=[2, 0, 1])
if (n_rot == 1) and (n_vec == 1) and (xyz_array.ndim == 3):
# This is a special case where we allow the rotation axis to "expand" along
# the 0th axis of the rot_amount arrays. For xyz_array, if n_vectors = 1
# but n_rot !=1, then it's a lot faster (by about 10x) to "switch it up" and
# swap the n_vector and n_rot axes, and then swap them back once everything
# else is done.
return np.transpose(
_rotate_matmul_wrapper(
np.transpose(xyz_array, axes=[2, 1, 0]), rot_matrix, n_rot,
),
axes=[2, 1, 0],
)
else:
return _rotate_matmul_wrapper(xyz_array, rot_matrix, n_rot)
def calc_uvw(
app_ra=None,
app_dec=None,
frame_pa=None,
lst_array=None,
use_ant_pos=True,
uvw_array=None,
antenna_positions=None,
antenna_numbers=None,
ant_1_array=None,
ant_2_array=None,
old_app_ra=None,
old_app_dec=None,
old_frame_pa=None,
telescope_lat=None,
telescope_lon=None,
from_enu=False,
to_enu=False,
):
"""
Calculate an array of baseline coordinates, in either uvw or ENU.
This routine is meant as a convenience function for producing baseline coordinates
based under a few different circumstances:
1) Calculating ENU coordinates using antenna positions
2) Calculating uwv coordinates at a given sky position using antenna positions
3) Converting from ENU coordinates to uvw coordinates
4) Converting from uvw coordinate to ENU coordinates
5) Converting from uvw coordinates at one sky position to another sky position
Different conversion pathways have different parameters that are required.
Parameters
----------
app_ra : ndarray of float
Apparent RA of the target phase center, required if calculating baseline
coordinates in uvw-space (vs ENU-space). Shape is (Nblts,), units are
radians.
app_dec : ndarray of float
Apparent declination of the target phase center, required if calculating
baseline coordinates in uvw-space (vs ENU-space). Shape is (Nblts,),
units are radians.
frame_pa : ndarray of float
Position angle between the great circle of declination in the apparent frame
versus that of the reference frame, used for making sure that "North" on
the derived maps points towards a particular celestial pole (not just the
topocentric one). Required if not deriving baseline coordinates from antenna
positions, from_enu=False, and a value for old_frame_pa is given. Shape is
(Nblts,), units are radians.
old_app_ra : ndarray of float
Apparent RA of the previous phase center, required if not deriving baseline
coordinates from antenna positions and from_enu=False. Shape is (Nblts,),
units are radians.
old_app_dec : ndarray of float
Apparent declination of the previous phase center, required if not deriving
baseline coordinates from antenna positions and from_enu=False. Shape is
(Nblts,), units are radians.
old_frame_pa : ndarray of float
Frame position angle of the previous phase center, required if not deriving
baseline coordinates from antenna positions, from_enu=False, and a value
for frame_pa is supplied. Shape is (Nblts,), units are radians.
lst_array : ndarray of float
Local apparent sidereal time, required if deriving baseline coordinates from
antenna positions, or converting to/from ENU coordinates. Shape is (Nblts,).
use_ant_pos : bool
Switch to determine whether to derive uvw values from the antenna positions
(if set to True), or to use the previously calculated uvw coordinates to derive
new the new baseline vectors (if set to False). Default is True.
uvw_array : ndarray of float
Array of previous baseline coordinates (in either uvw or ENU), required if
not deriving new coordinates from antenna positions. Shape is (Nblts, 3).
antenna_positions : ndarray of float
List of antenna positions relative to array center in ECEF coordinates,
required if not providing `uvw_array`. Shape is (Nants, 3).
antenna_numbers: ndarray of int
List of antenna numbers, ordered in the same way as `antenna_positions` (e.g.,
`antenna_numbers[0]` should given the number of antenna that resides at ECEF
position given by `antenna_positions[0]`). Shape is (Nants,), requred if not
providing `uvw_array`. Contains all unique entires of the joint set of
`ant_1_array` and `ant_2_array`.
ant_1_array : ndarray of int
Antenna number of the first antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
ant_2_array : ndarray of int
Antenna number of the second antenna in the baseline pair, for all baselines
Required if not providing `uvw_array`, shape is (Nblts,).
telescope_lat : float
Latitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
telescope_lon : float
Longitude of the phase center, units radians, required if deriving baseline
coordinates from antenna positions, or converting to/from ENU coordinates.
from_enu : boolean
Set to True if uvw_array is expressed in ENU coordinates. Default is False.
to_enu : boolean
Set to True if you would like the output expressed in EN coordinates. Default
is False.
Returns
-------
new_coords : ndarray of float64
Set of baseline coordinates, shape (Nblts, 3).
"""
if to_enu:
if lst_array is None and not use_ant_pos:
raise ValueError(
"Must include lst_array to calculate baselines in ENU coordinates!"
)
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat to calculate baselines "
"in ENU coordinates!"
)
else:
if ((app_ra is None) or (app_dec is None)) and frame_pa is None:
raise ValueError(
"Must include both app_ra and app_dec, or frame_pa to calculate "
"baselines in uvw coordinates!"
)
if use_ant_pos:
# Assume at this point we are dealing w/ antenna positions
if antenna_positions is None:
raise ValueError("Must include antenna_positions if use_ant_pos=True.")
if (ant_1_array is None) or (ant_2_array is None) or (antenna_numbers is None):
raise ValueError(
"Must include ant_1_array, ant_2_array, and antenna_numbers "
"setting use_ant_pos=True."
)
if lst_array is None and not to_enu:
raise ValueError(
"Must include lst_array if use_ant_pos=True and not calculating "
"baselines in ENU coordinates."
)
if telescope_lon is None:
raise ValueError("Must include telescope_lon if use_ant_pos=True.")
ant_dict = {ant_num: idx for idx, ant_num in enumerate(antenna_numbers)}
ant_1_index = np.array([ant_dict[idx] for idx in ant_1_array], dtype=int)
ant_2_index = np.array([ant_dict[idx] for idx in ant_2_array], dtype=int)
N_ants = antenna_positions.shape[0]
# Use the app_ra, app_dec, and lst_array arrays to figure out how many unique
# rotations are actually needed. If the ratio of Nblts to number of unique
# entries is favorable, we can just rotate the antenna positions and save
# outselves a bit of work.
if to_enu:
# If to_enu, skip all this -- there's only one unique ha + dec combo
unique_mask = np.zeros(len(ant_1_index), dtype=np.bool_)
unique_mask[0] = True
else:
unique_mask = np.append(
True,
(
((lst_array[:-1] - app_ra[:-1]) != (lst_array[1:] - app_ra[1:]))
| (app_dec[:-1] != app_dec[1:])
),
)
# GHA -> Hour Angle as measured at Greenwich (because antenna coords are
# centered such that x-plane intersects the meridian at longitude 0).
if to_enu:
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof the gha and dec based on telescope lon and lat
unique_gha = np.zeros(1) - telescope_lon
unique_dec = np.zeros(1) + telescope_lat
unique_pa = None
else:
unique_gha = (lst_array[unique_mask] - app_ra[unique_mask]) - telescope_lon
unique_dec = app_dec[unique_mask]
unique_pa = 0.0 if frame_pa is None else frame_pa[unique_mask]
# Tranpose the ant vectors so that they are in the proper shape
ant_vectors = np.transpose(antenna_positions)[np.newaxis, :, :]
# Apply rotations, and then reorganize the ndarray so that you can access
# individual antenna vectors quickly.
ant_rot_vectors = np.reshape(
np.transpose(
_rotate_one_axis(
_rotate_two_axis(ant_vectors, unique_gha, unique_dec, 2, 1),
unique_pa,
0,
),
axes=[0, 2, 1],
),
(-1, 3),
)
unique_mask[0] = False
unique_map = np.cumsum(unique_mask) * N_ants
new_coords = (
ant_rot_vectors[unique_map + ant_2_index]
- ant_rot_vectors[unique_map + ant_1_index]
)
else:
if uvw_array is None:
raise ValueError("Must include uvw_array if use_ant_pos=False.")
if from_enu:
if to_enu:
# Well this was pointless... returning your uvws unharmed
return uvw_array
# Unphased coordinates appear to be stored in ENU coordinates -- that's
# equivalent to calculating uvw's based on zenith. We can use that to our
# advantage and spoof old_app_ra and old_app_dec based on lst_array and
# telescope_lat
if telescope_lat is None:
raise ValueError(
"Must include telescope_lat if moving between "
'ENU (i.e., "unphased") and uvw coordinates!'
)
if lst_array is None:
raise ValueError(
'Must include lst_array if moving between ENU (i.e., "unphased") '
"and uvw coordinates!"
)
else:
if (old_frame_pa is None) and not (frame_pa is None or to_enu):
raise ValueError(
"Must include old_frame_pa values if data are phased and "
"applying new position angle values (frame_pa)."
)
if ((old_app_ra is None) and not (app_ra is None or to_enu)) or (
(old_app_dec is None) and not (app_dec is None or to_enu)
):
raise ValueError(
"Must include old_app_ra and old_app_dec values when data are "
"already phased and phasing to a new position."
)
# For this operation, all we need is the delta-ha coverage, which _should_ be
# entirely encapsulated by the change in RA.
if (app_ra is None) and (old_app_ra is None):
gha_delta_array = 0.0
else:
gha_delta_array = (lst_array if from_enu else old_app_ra) - (
lst_array if to_enu else app_ra
)
# Notice below there's an axis re-orientation here, to go from uvw -> XYZ,
# where X is pointing in the direction of the source. This is mostly here
# for convenience and code legibility -- a slightly different pair of
# rotations would give you the same result w/o needing to cycle the axes.
# Up front, we want to trap the corner-case where the sky position you are
# phasing up to hasn't changed, just the position angle (i.e., which way is
# up on the map). This is a much easier transform to handle.
if np.all(gha_delta_array == 0.0) and np.all(old_app_dec == app_dec):
new_coords = _rotate_one_axis(
uvw_array[:, [2, 0, 1], np.newaxis],
frame_pa - (0.0 if old_frame_pa is None else old_frame_pa),
0,
)[:, :, 0]
else:
new_coords = _rotate_two_axis(
_rotate_two_axis( # Yo dawg, I heard you like rotation maticies...
uvw_array[:, [2, 0, 1], np.newaxis],
0.0 if (from_enu or old_frame_pa is None) else (-old_frame_pa),
(-telescope_lat) if from_enu else (-old_app_dec),
0,
1,
),
gha_delta_array,
telescope_lat if to_enu else app_dec,
2,
1,
)
# One final rotation applied here, to compensate for the fact that we want
# the Dec-axis of our image (Fourier dual to the v-axis) to be aligned with
# the chosen frame, if we not in ENU coordinates
if not to_enu:
new_coords = _rotate_one_axis(new_coords, frame_pa, 0)
# Finally drop the now-vestigal last axis of the array
new_coords = new_coords[:, :, 0]
# There's one last task to do, which is to re-align the axes from projected
# XYZ -> uvw, where X (which points towards the source) falls on the w axis,
# and Y and Z fall on the u and v axes, respectively.
return new_coords[:, [1, 2, 0]]
def transform_sidereal_coords(
lon,
lat,
in_coord_frame,
out_coord_frame,
in_coord_epoch=None,
out_coord_epoch=None,
time_array=None,
):
"""
Transform a given set of coordinates from one sidereal coordinate frame to another.
Uses astropy to convert from a coordinates from sidereal frame into another.
This function will support transforms from several frames, including GCRS,
FK5 (i.e., J2000), FK4 (i.e., B1950), Galactic, Supergalactic, CIRS, HCRS, and
a few others (basically anything that doesn't require knowing the observers
location on Earth/other celestial body).
Parameters
----------
lon_coord : float or ndarray of floats
Logitudinal coordinate to be transformed, typically expressed as the right
ascension, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lat_coord.
lat_coord : float or ndarray of floats
Latitudinal coordinate to be transformed, typically expressed as the
declination, in units of radians. Can either be a float, or an ndarray of
floats with shape (Ncoords,). Must agree with lon_coord.
in_coord_frame : string
Reference frame for the provided coordinates. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
out_coord_frame : string
Reference frame to output coordinates in. Expected to match a list of
those supported within the astropy SkyCoord object. An incomplete list includes
'gcrs', 'fk4', 'fk5', 'galactic', 'supergalactic', 'cirs', and 'hcrs'.
in_coord_epoch : float
Epoch for the input coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
out_coord_epoch : float
Epoch for the output coordinate frame. Optional parameter, only required
when using either the FK4 (B1950) or FK5 (J2000) coordinate systems. Units are
in fractional years.
time_array : float or ndarray of floats
Julian date(s) to which the coordinates correspond to, only used in frames
with annular motion terms (e.g., abberation in GCRS). Can either be a float,
or an ndarray of floats with shape (Ntimes,), assuming that either lat_coord
and lon_coord are floats, or that Ntimes == Ncoords.
Returns
-------
new_lat : float or ndarray of floats
Longitudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
new_lon : float or ndarray of floats
Latidudinal coordinates, in units of radians. Output will be an ndarray
if any inputs were, with shape (Ncoords,) or (Ntimes,), depending on inputs.
"""
lon_coord = lon * units.rad
lat_coord = lat * units.rad
# Check here to make sure that lat_coord and lon_coord are the same length,
# either 1 or len(time_array)
if lat_coord.shape != lon_coord.shape:
raise ValueError("lon and lat must be the same shape.")
if lon_coord.ndim == 0:
lon_coord.shape += (1,)
lat_coord.shape += (1,)
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
in_epoch = None
if isinstance(in_coord_epoch, str) or isinstance(in_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
in_epoch = Time(in_coord_epoch)
elif in_coord_epoch is not None:
if in_coord_frame.lower() in ["fk4", "fk4noeterms"]:
in_epoch = Time(in_coord_epoch, format="byear")
else:
in_epoch = Time(in_coord_epoch, format="jyear")
# Now do the same for the outbound frame
out_epoch = None
if isinstance(out_coord_epoch, str) or isinstance(out_coord_epoch, Time):
# If its a string or a Time object, we don't need to do anything more
out_epoch = Time(out_coord_epoch)
elif out_coord_epoch is not None:
if out_coord_frame.lower() in ["fk4", "fk4noeterms"]:
out_epoch = Time(out_coord_epoch, format="byear")
else:
out_epoch = Time(out_coord_epoch, format="jyear")
# Make sure that time array matched up with what we expect. Thanks to astropy
# weirdness, time_array has to be the same length as lat/lon coords
rep_time = False
rep_crds = False
if time_array is None:
time_obj_array = None
else:
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if (time_obj_array.size != 1) and (lon_coord.size != 1):
if time_obj_array.shape != lon_coord.shape:
raise ValueError(
"Shape of time_array must be either that of "
" lat_coord/lon_coord if len(time_array) > 1."
)
else:
rep_crds = (time_obj_array.size != 1) and (lon_coord.size == 1)
rep_time = (time_obj_array.size == 1) and (lon_coord.size != 1)
if rep_crds:
lon_coord = np.repeat(lon_coord, len(time_array))
lat_coord = np.repeat(lat_coord, len(time_array))
if rep_time:
time_obj_array = Time(
np.repeat(time_obj_array.jd, len(lon_coord)), format="jd", scale="utc",
)
coord_object = SkyCoord(
lon_coord,
lat_coord,
frame=in_coord_frame,
equinox=in_epoch,
obstime=time_obj_array,
)
# Easiest, most general way to transform to the new frame is to create a dummy
# SkyCoord with all the attributes needed -- note that we particularly need this
# in order to use a non-standard equinox/epoch
new_coord = coord_object.transform_to(
SkyCoord(0, 0, unit="rad", frame=out_coord_frame, equinox=out_epoch)
)
return new_coord.spherical.lon.rad, new_coord.spherical.lat.rad
def transform_icrs_to_app(
time_array,
ra,
dec,
telescope_loc,
epoch=2000.0,
pm_ra=None,
pm_dec=None,
vrad=None,
dist=None,
astrometry_library="erfa",
):
"""
Transform a set of coordinates in ICRS to topocentric/apparent coordinates.
This utility uses one of three libraries (astropy, NOVAS, or ERFA) to calculate
the apparent (i.e., topocentric) coordinates of a source at a given time and
location, given a set of coordinates expressed in the ICRS frame. These coordinates
are most typically used for defining the phase center of the array (i.e, calculating
baseline vectors).
As of astropy v4.2, the agreement between the three libraries is consistent down to
the level of better than 1 mas, with the values produced by astropy and pyERFA
consistent to bettter than 10 µas (this is not surprising, given that astropy uses
pyERFA under the hood for astrometry). ERFA is the default as it outputs
coordinates natively in the apparent frame (whereas NOVAS and astropy do not), as
well as the fact that of the three libraries, it produces results the fastest.
Parameters
----------
time_array : float or array-like of float
Julian dates to calculate coordinate positions for. Can either be a single
float, or an array-like of shape (Ntimes,).
ra : float or array-like of float
ICRS RA of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
dec : float or array-like of float
ICRS Dec of the celestial target, expressed in units of radians. Can either
be a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (with the exception of telescope location parameters).
telescope_loc : array-like of floats or EarthLocation
ITRF latitude, longitude, and altitude (rel to sea-level) of the phase center
of the array. Can either be provided as an astropy EarthLocation, or a tuple
of shape (3,) containung (in order) the latitude, longitude, and altitude,
in units of radians, radians, and meters, respectively.
epoch : int or float or str or Time object
Epoch of the coordinate data supplied, only used when supplying proper motion
values. If supplying a number, it will assumed to be in Julian years. Default
is J2000.0.
pm_ra : float or array-like of float
Proper motion in RA of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Note that
units are in dRA/dt, not cos(Dec)*dRA/dt. Not required.
pm_dec : float or array-like of float
Proper motion in Dec of the source, expressed in units of milliarcsec / year.
Proper motion values are applied relative to the J2000 (i.e., RA/Dec ICRS
values should be set to their expected values when the epoch is 2000.0).
Can either be a single float or array of shape (Ntimes,), although this must
be consistent with other parameters (namely ra_coord and dec_coord). Not
required.
vrad : float or array-like of float
Radial velocity of the source, expressed in units of km / sec. Can either be
a single float or array of shape (Ntimes,), although this must be consistent
with other parameters (namely ra_coord and dec_coord). Not required.
dist : float or array-like of float
Distance of the source, expressed in milliarcseconds. Can either be a single
float or array of shape (Ntimes,), although this must be consistent with other
parameters (namely ra_coord and dec_coord). Not required.
astrometry_library : str
Library used for running the coordinate conversions. Allowed options are
'erfa' (which uses the pyERFA), 'novas' (which uses the python-novas library),
and 'astropy' (which uses the astropy utilities). Default is erfa.
Returns
-------
app_ra : ndarray of floats
Apparent right ascension coordinates, in units of radians, of shape (Ntimes,).
app_dec : ndarray of floats
Apparent declination coordinates, in units of radians, of shape (Ntimes,).
"""
# Make sure that the library requested is actually permitted
if astrometry_library not in ["erfa", "novas", "astropy"]:
raise ValueError(
"Requested coordinate transformation library is not supported, please "
"select either 'erfa', 'novas', or 'astropy' for astrometry_library."
)
ra_coord = ra * units.rad
dec_coord = dec * units.rad
# Check here to make sure that ra_coord and dec_coord are the same length,
# either 1 or len(time_array)
multi_coord = ra_coord.size != 1
if ra_coord.shape != dec_coord.shape:
raise ValueError("ra and dec must be the same shape.")
pm_ra_coord = None if pm_ra is None else pm_ra * (units.mas / units.yr)
pm_dec_coord = None if pm_dec is None else pm_dec * (units.mas / units.yr)
d_coord = (
None if (dist is None or np.all(dist == 0.0)) else Distance(dist * units.pc)
)
v_coord = None if vrad is None else vrad * (units.km / units.s)
opt_list = [pm_ra_coord, pm_dec_coord, d_coord, v_coord]
opt_names = ["pm_ra", "pm_dec", "dist", "vrad"]
# Check the optional inputs, make sure that they're sensible
for item, name in zip(opt_list, opt_names):
if item is not None:
if ra_coord.shape != item.shape:
raise ValueError("%s must be the same shape as ra and dec." % name)
if isinstance(telescope_loc, EarthLocation):
site_loc = telescope_loc
else:
site_loc = EarthLocation.from_geodetic(
telescope_loc[1] * (180.0 / np.pi),
telescope_loc[0] * (180.0 / np.pi),
height=telescope_loc[2],
)
# Useful for both astropy and novas methods, the latter of which gives easy
# access to the IERS data that we want.
if isinstance(time_array, Time):
time_obj_array = time_array
else:
time_obj_array = Time(time_array, format="jd", scale="utc")
if time_obj_array.size != 1:
if (time_obj_array.shape != ra_coord.shape) and multi_coord:
raise ValueError(
"time_array must be of either of length 1 (single "
"float) or same length as ra and dec."
)
elif time_obj_array.ndim == 0:
# Make the array at least 1-dimensional so we don't run into indexing
# issues later.
time_obj_array = Time([time_obj_array])
# Check to make sure that we have a properly formatted epoch for our in-bound
# coordinate frame
coord_epoch = None
if isinstance(epoch, str) or isinstance(epoch, Time):
# If its a string or a Time object, we don't need to do anything more
coord_epoch = Time(epoch)
elif epoch is not None:
coord_epoch = Time(epoch, format="jyear")
# Note if time_array is a single element
multi_time = time_obj_array.size != 1
# Get IERS data, which is needed for NOVAS and ERFA
polar_motion_data = iers.earth_orientation_table.get()
pm_x_array, pm_y_array = polar_motion_data.pm_xy(time_obj_array)
delta_x_array, delta_y_array = polar_motion_data.dcip_xy(time_obj_array)
pm_x_array = pm_x_array.to_value("arcsec")
pm_y_array = pm_y_array.to_value("arcsec")
delta_x_array = delta_x_array.to_value("marcsec")
delta_y_array = delta_y_array.to_value("marcsec")
# Catch the case where we don't have CIP delta values yet (they don't typically have
# predictive values like the polar motion does)
delta_x_array[np.isnan(delta_x_array)] = 0.0
delta_y_array[np.isnan(delta_y_array)] = 0.0
# If the source was instantiated w/ floats, it'll be a 0-dim object, which will
# throw errors if we try to treat it as an array. Reshape to a 1D array of len 1
# so that all the calls can be uniform
if ra_coord.ndim == 0:
ra_coord.shape += (1,)
dec_coord.shape += (1,)
if pm_ra_coord is not None:
pm_ra
if d_coord is not None:
d_coord.shape += (1,)
if v_coord is not None:
v_coord.shape += (1,)
# If there is an epoch and a proper motion, apply that motion now
if astrometry_library == "astropy":
# Astropy doesn't have (oddly enough) a way of getting at the apparent RA/Dec
# directly, but we can cheat this by going to AltAz, and then coverting back
# to apparent RA/Dec using the telescope lat and LAST.
if (epoch is not None) and (pm_ra is not None) and (pm_dec is not None):
# astropy is a bit weird in how it handles proper motion, so rather than
# fight with it to do it all in one step, we separate it into two: first
# apply proper motion to ICRS, then transform to topocentric.
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
pm_ra_cosdec=pm_ra_coord * np.cos(dec_coord),
pm_dec=pm_dec_coord,
frame="icrs",
)
sky_coord = sky_coord.apply_space_motion(dt=(time_obj_array - coord_epoch))
ra_coord = sky_coord.ra
dec_coord = sky_coord.dec
if d_coord is not None:
d_coord = d_coord.repeat(ra_coord.size)
if v_coord is not None:
v_coord = v_coord.repeat(ra_coord.size)
sky_coord = SkyCoord(
ra=ra_coord,
dec=dec_coord,
distance=d_coord,
radial_velocity=v_coord,
frame="icrs",
)
azel_data = sky_coord.transform_to(
SkyCoord(
np.zeros_like(time_obj_array) * units.rad,
np.zeros_like(time_obj_array) * units.rad,
location=site_loc,
obstime=time_obj_array,
frame="altaz",
)
)
app_ha, app_dec = erfa.ae2hd(
azel_data.az.rad, azel_data.alt.rad, site_loc.lat.rad,
)
app_ra = np.mod(
time_obj_array.sidereal_time("apparent", longitude=site_loc.lon).rad
- app_ha,
2 * np.pi,
)
elif astrometry_library == "novas":
# Import the NOVAS library only if it's needed/available.
try:
from novas import compat as novas
from novas.compat import eph_manager
import novas_de405 # noqa
except ImportError as e: # pragma: no cover
raise ImportError(
"novas and/or novas_de405 are not installed but is required for "
"NOVAS functionality"
) from e
# Call is needed to load high-precision ephem data in NOVAS
jd_start, jd_end, number = eph_manager.ephem_open()
# Define the obs location, which is needed to calculate diurnal abb term
# and polar wobble corrections
site_loc = novas.make_on_surface(
site_loc.lat.deg, # latitude in deg
site_loc.lon.deg, # Longitude in deg
site_loc.height.to_value("m"), # Height in meters
0.0, # Temperature, set to 0 for now (no atm refrac)
0.0, # Pressure, set to 0 for now (no atm refrac)
)
# NOVAS wants things in terrestial time and UT1
tt_time_array = time_obj_array.tt.jd
ut1_time_array = time_obj_array.ut1.jd
gast_array = time_obj_array.sidereal_time("apparent", "greenwich").rad
if np.any(tt_time_array < jd_start) or np.any(tt_time_array > jd_end):
raise ValueError(
"No current support for JPL ephems outside of 1700 - 2300 AD. "
"Check back later (or possibly earlier)..."
)
app_ra = | np.zeros(tt_time_array.shape) | numpy.zeros |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
#
# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
"""Tests the I-Vector trainer
"""
import numpy
import numpy.linalg
import numpy.random
import nose.tools
from bob.learn.em import GMMMachine, GMMStats, IVectorMachine, IVectorTrainer
import bob.learn.em
### Test class inspired by an implementation of Chris McCool
### <NAME> (<EMAIL>)
class IVectorTrainerPy():
"""An IVector extractor"""
def __init__(self, convergence_threshold=0.001, max_iterations=10,
compute_likelihood=False, sigma_update=False, variance_floor=1e-5):
self.m_convergence_threshold = convergence_threshold
self.m_max_iterations = max_iterations
self.m_compute_likelihood = compute_likelihood
self.m_sigma_update = sigma_update
self.m_variance_floor = variance_floor
def initialize(self, machine, data):
ubm = machine.ubm
self.m_dim_c = ubm.shape[0]
self.m_dim_d = ubm.shape[1]
self.m_dim_t = machine.t.shape[1]
self.m_meansupervector = ubm.mean_supervector
t = numpy.random.randn(self.m_dim_c*self.m_dim_d, self.m_dim_t)
machine.t = t
machine.sigma = machine.ubm.variance_supervector
def e_step(self, machine, data):
n_samples = len(data)
self.m_acc_Nij_Sigma_wij2 = {}
self.m_acc_Fnorm_Sigma_wij = {}
self.m_acc_Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
self.m_N = numpy.zeros(shape=(self.m_dim_c,), dtype=numpy.float64)
for c in range(self.m_dim_c):
self.m_acc_Nij_Sigma_wij2[c] = numpy.zeros(shape=(self.m_dim_t,self.m_dim_t), dtype=numpy.float64)
self.m_acc_Fnorm_Sigma_wij[c] = numpy.zeros(shape=(self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
for n in range(n_samples):
Nij = data[n].n
Fij = data[n].sum_px
Sij = data[n].sum_pxx
# Estimate latent variables
TtSigmaInv_Fnorm = machine.__compute_TtSigmaInvFnorm__(data[n])
I_TtSigmaInvNT = machine.__compute_Id_TtSigmaInvT__(data[n])
Fnorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
# Compute normalized statistics
for c in range(self.m_dim_c):
start = c*self.m_dim_d
end = (c+1)*self.m_dim_d
Fc = Fij[c,:]
Sc = Sij[c,:]
mc = self.m_meansupervector[start:end]
Fc_mc = Fc * mc
Nc_mc_mcT = Nij[c] * mc * mc
Fnorm[start:end] = Fc - Nij[c] * mc
Snorm[start:end] = Sc - (2 * Fc_mc) + Nc_mc_mcT
# Latent variables
I_TtSigmaInvNT_inv = numpy.linalg.inv(I_TtSigmaInvNT)
E_w_ij = numpy.dot(I_TtSigmaInvNT_inv, TtSigmaInv_Fnorm)
E_w_ij2 = I_TtSigmaInvNT_inv + numpy.outer(E_w_ij, E_w_ij)
# Do the accumulation for each component
self.m_acc_Snorm = self.m_acc_Snorm + Snorm # (dim_c*dim_d)
for c in range(self.m_dim_c):
start = c*self.m_dim_d
end = (c+1)*self.m_dim_d
current_Fnorm = Fnorm[start:end] # (dim_d)
self.m_acc_Nij_Sigma_wij2[c] = self.m_acc_Nij_Sigma_wij2[c] + Nij[c] * E_w_ij2 # (dim_t, dim_t)
self.m_acc_Fnorm_Sigma_wij[c] = self.m_acc_Fnorm_Sigma_wij[c] + numpy.outer(current_Fnorm, E_w_ij) # (dim_d, dim_t)
self.m_N[c] = self.m_N[c] + Nij[c]
def m_step(self, machine, data):
A = self.m_acc_Nij_Sigma_wij2
T = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
Told = machine.t
if self.m_sigma_update:
sigma = numpy.zeros(shape=self.m_acc_Snorm.shape, dtype=numpy.float64)
for c in range(self.m_dim_c):
start = c*self.m_dim_d;
end = (c+1)*self.m_dim_d;
# T update
A = self.m_acc_Nij_Sigma_wij2[c].transpose()
B = self.m_acc_Fnorm_Sigma_wij[c].transpose()
if numpy.array_equal(A, numpy.zeros(A.shape)):
X = numpy.zeros(shape=(self.m_dim_t,self.m_dim_d), dtype=numpy.float64)
else:
X = | numpy.linalg.solve(A, B) | numpy.linalg.solve |
import numpy as np
from numpy import sin, cos, einsum
from scipy.special import j0, j1, jn_zeros
from rssympim.constants import constants as consts
# Commented out until MPI implementation is ready
from mpi4py import MPI as mpi
# # #
#
# A Note On Indexing Conventions
#
# # #
#
# This class relies heavily on the einsum numpy function.
#
# * the z mode is always indexed as 'z'
# * the r mode is always indexed as 'r'
# * the particle number is always indexed as 'p'
#
# This improves readability and makes debugging easier.
#
# # #
class field_data(object):
"""
Class that stores the field data and can compute particle-field interactions
Parameters
----------
L: float (cm)
Length (z) of domain
R: float (cm)
Radius of domain
n_modes_z: int
Number of longitudinal modes to be computed
n_modes_r: int
Number of radial modes to be computed
"""
def __init__(self, L, R, n_modes_z, n_modes_r):
self.n_modes_r = n_modes_r
self.n_modes_z = n_modes_z
self.domain_L = L
self.domain_R = R
self.kr = jn_zeros(0, self.n_modes_r)/R
self.kz = np.pi * np.arange(1, self.n_modes_z + 1) / L
self.oneOkr = 1./self.kr
self.oneOkz = 1./self.kz
# Needed for the normalization
zero_zeros = jn_zeros(0, self.n_modes_r)
self.omega_coords = np.zeros((self.n_modes_z,self.n_modes_r, 2))
self.dc_coords = np.zeros((self.n_modes_z, self.n_modes_r, 2))
self.mode_mass = np.ones((self.n_modes_z, self.n_modes_r))
self.omega = np.zeros((self.n_modes_z,self.n_modes_r))
for idx_r in range(0,self.n_modes_r):
for idx_z in range(0,self.n_modes_z):
self.omega[idx_z,idx_r]= \
np.sqrt(self.kr[idx_r]**2 +self.kz[idx_z]**2)
# Integral of cos^2(k_z z)*J_z(k_r r)^2 over the domain volume
self.mode_mass[idx_z, idx_r] = R*R*L*(j1(zero_zeros[idx_r]))**2/(4.*consts.c)
self.omegaOtwokz = 0.5 * np.einsum('z, zr -> zr', 1. / self.kz, self.omega)
self.omegaOtwokr = 0.5 * np.einsum('r, zr -> zr', 1. / self.kr, self.omega)
self.oneOomega = 1./self.omega
self.kzOomega = einsum('z, zr -> zr', self.kz, self.oneOomega)
self.krOomega = einsum('r, zr -> zr', self.kr, self.oneOomega)
self.delta_P_dc = np.zeros((self.n_modes_z,self.n_modes_r))
self.delta_P_omega = np.zeros((self.n_modes_z,self.n_modes_r))
# Particles are tent functions with widths the narrowest of the
# k-vectors for each direction. Default for now is to have the
# particle widths be half the shortest wavelength, which should
# resolve the wave physics reasonably well.
self.ptcl_width_z = .25*2.*np.pi/max(self.kz)
self.ptcl_width_r = .25*2.*np.pi/max(self.kr)
self.shape_function_z = np.exp(-0.5*(self.kz*self.ptcl_width_z)**2)
# With the conducting boundaries, unphysically large fields can
# get trapped on the conducting surfaces. To squelch this, we put
# a tanh-function envelope on the fields to make the fields go to
# zero quickly but smoothly on the boundaries.
self.tanh_width = np.max(self.kz)
self.z_mean = 0.5*self.domain_L
# Create the mpi communicator
self.comm = mpi.COMM_WORLD
def convolved_j0(self, _x, delta_x):
"""
Use Romberg integration to approximate the convolution integral
with j0 to fourth order in the particle size
Parameters
----------
_x: float (cm)
a 2darray of macroparticle phases k_x*x, of shape (n_modes,particles.np)
delta_x: float(cm)
macroparticle width - 1darray of reals
Returns
-------
A 2darray of reals, of shape (n_modes, particles.np)
"""
return (
j0(_x - 0.5 * delta_x) +
4.*j0(_x) +
j0(_x + 0.5 * delta_x)
) / 6.
def convolved_j1(self, _x, delta_x):
"""
Use Romberg integration to approximate the convolution integral
with j1 to fourth order in the particle size
Parameters
----------
_x: float (cm)
a 2darray of macroparticle phases k_x*x, of shape (n_modes,particles.np)
delta_x: float(cm)
macroparticle width - 1darray of reals
Returns
-------
A 2darray of reals, of shape (n_modes, particles.np)
"""
return (
j1(_x-0.5*delta_x) +
4.*j1(_x) +
j1(_x+0.5*delta_x)
)/6.
def int_convolved_j1(self, _x, delta_x):
"""
Analytic integral of the convolved_j1 Romberg approximation
with j1 to fourth order in the particle size
Parameters
----------
_x: float (cm)
a 2darray of macroparticle phases k_x*x, of shape (n_modes,particles.np)
delta_x: float(cm)
macroparticle width - 1darray of reals
Returns
-------
A 2darray of reals, of shape (n_modes, particles.np)
"""
return -(
j0(_x - 0.5*delta_x) +
4.*j0(_x) +
j0(_x + 0.5*delta_x)
)/6.
def compute_S_r_kick(self, r, z, qOc, **kwargs):
"""
Evaluates the radial kicks for a set of particles
Parameters
----------
r: float (cm)
a 1darray of macroparticle coordinates, of shape (particles.np,)
z: float(cm)
a 1darray of macroparticle coordinates, of shape (particles.np,)
qOc: float(cm)
a 1darray of macroparticle charge:mass ratios, of shape (particles.np,)
Returns
-------
A length-4 list of 1darrays, each of shape (particles.np,)
"""
# Calculate the convolution quantities we need
kr_cross_r = einsum('r, p -> rp', self.kr, r)
# z does not change between S_r and S_r-inverse, so only need to compute once
if kwargs['inverse'] == False:
self.kz_cross_z = einsum('z, p -> zp', self.kz, z)
self.convolved_sin = einsum('zp, z -> zp', sin(self.kz_cross_z), self.shape_function_z)
self.d_convolved_sin_dz = einsum('zp, z -> zp', cos(self.kz_cross_z), self.kz*self.shape_function_z)
# same here
self.delta_r = np.ones( | np.size(r) | numpy.size |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
import misc.utils as utils
from misc.rewards import init_scorer, cal_cider,get_scores_separate
import pandas as pd
import numpy as np
import random
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def language_eval(dataset, preds, model_id, split):
import sys
sys.path.append("coco-caption")
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'coco-caption/f30k_captions4eval.json'
elif 'person' in dataset:
annFile='coco-caption/person_captions4eval.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
best_cider=0
#gdindex=[0,1,2,3,4]
gdindex=[-1]
cider_list =[]
for i in gdindex:
annFile='coco-caption/person_captions4eval_'+str(i)+'.json'
print(annFile)
coco = COCO(annFile)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt = [p for p in preds if p['image_id'] in valids]
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
cider_list.append(cocoEval.eval['CIDEr'])
# create output dictionary
if cocoEval.eval['CIDEr']>=best_cider:
best_cider = cocoEval.eval['CIDEr']
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
# collect SPICE_sub_score
#for k in imgToEval.values()[0]['SPICE'].keys():
# if k != 'All':
# out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
# out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
#update predictions
for i in range(len(preds)):
if preds[i]['image_id'] in imgToEval:
preds[i]['eval'] = imgToEval[preds[i]['image_id']]
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
else:
continue
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
cider_list=np.array(cider_list)
print("min:",np.min(cider_list)," max:",np.max(cider_list)," mean:",np.mean(cider_list)," std:",np.std(cider_list))
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 1)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
rank_eval = eval_kwargs.get('rank_eval', 0)
dataset = eval_kwargs.get('dataset', 'person')
beam_size = eval_kwargs.get('beam_size', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
use_joint=eval_kwargs.get('use_joint', 0)
init_scorer('person-'+split+'-words')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
losses={}
loss_sum = 0
loss_evals = 1e-8
predictions = []
visual={"image_id":[],"personality":[],"generation":[],"gd":[],"densecap":[],"Bleu1_gen/cap":[],"Bleu2_gen/cap":[],"Bleu3_gen/cap":[],"Bleu4_gen/cap":[],"Cider_gen/cap":[],"Bleu1_gen/gd":[],"Bleu2_gen/gd":[],"Bleu3_gen/gd":[],"Bleu4_gen/gd":[],"Cider_gen/gd":[],"Bleu1_cap/gd":[],"Bleu2_cap/gd":[],"Bleu3_cap/gd":[],"Bleu4_cap/gd":[],"Cider_cap/gd":[], "Bleu1_gd/gen":[],"Bleu2_gd/gen":[],"Bleu3_gd/gen":[],"Bleu4_gd/gen":[],"Cider_gd/gen":[]}
if split=='change':
visual['new_personality']=[]
minopt=0
verbose_loss = True
while True:
data = loader.get_batch(split)
n = n + loader.batch_size
if data.get('labels', None) is not None and verbose_loss:
# forward the model to get loss
tmp = [data['fc_feats'], data['att_feats'],data['densecap'], data['labels'], data['masks'], data['att_masks'], data['personality']]
tmp = [_.cuda() if _ is not None else _ for _ in tmp]
fc_feats, att_feats,densecap, labels, masks, att_masks,personality = tmp
with torch.no_grad():
if eval_kwargs.get("use_dl",0)>0:
gen_result, sample_logprobs,alogprobs = model(fc_feats, att_feats,densecap, att_masks,personality, opt={'sample_method':'sample'}, mode='sample')
loss = crit(model(fc_feats, att_feats,densecap, labels, att_masks,personality), alogprobs, labels[:,1:], masks[:,1:]).item()
else:
loss = crit(model(fc_feats, att_feats,densecap, labels, att_masks,personality), labels[:,1:], masks[:,1:])
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
if use_joint==1:
for k,v in model.loss().items():
if k not in losses:
losses[k] = 0
losses[k] += v
# forward the model to also get generated samples for each image
# Only leave one feature for each image, in case duplicate sample
tmp = [data['fc_feats'][np.arange(loader.batch_size)],
data['att_feats'][np.arange(loader.batch_size)] if data['att_feats'] is not None else None,
data['densecap'][np.arange(loader.batch_size)],
data['att_masks'][np.arange(loader.batch_size)] if data['att_masks'] is not None else None,
data['personality'][np.arange(loader.batch_size)]]
tmp = [_.cuda() if _ is not None else _ for _ in tmp]
fc_feats, att_feats,densecap, att_masks,personality = tmp
if split =='change':
for pindex,pid in personality.nonzero():
personality[pindex][pid]=0
newpid = random.choice(range(1,len(personality)-1))
personality[pindex][newpid]=1
ground_truth = data['labels'][:][:,1:]
# forward the model to also get generated samples for each image
with torch.no_grad():
seq = model(fc_feats, att_feats,densecap, att_masks,personality, opt=eval_kwargs, mode='sample')[0].data
# Print beam search
# if beam_size > 1 and verbose_beam:
# for i in range(loader.batch_size):
# print('\n'.join([utils.decode_sequence(loader.get_vocab(), _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
# print('--' * 10)
sents = utils.decode_sequence(loader.get_vocab(), seq)
gd_display = utils.decode_sequence(loader.get_vocab(), ground_truth)
for k, s in enumerate(sents):
if beam_size > 1 and verbose_beam:
beam_sents = [utils.decode_sequence(loader.get_vocab(), _['seq'].unsqueeze(0))[0] for _ in model.done_beams[k]]
maxcider=0
mincider=1000
sent =s
for b,sq in enumerate(beam_sents):
current_cider=cal_cider(gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img],sq)
if current_cider >= maxcider:
maxcider=current_cider
sentmax=sq
if current_cider <= mincider:
mincider=current_cider
sentmin=sq
if minopt==1:
sent=sentmin
elif minopt==-1:
sent=sentmax
else:
sent=s
else:
sent = s
#print("best sentence: ",sent)
newpidstr = str(personality[k].nonzero()[0].item())
changed_personality =loader.get_personality()[newpidstr]
entry = {'image_id': data['infos'][k]['id']+"_"+data['infos'][k]['personality'], 'caption':sent,'gd':gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img]}
if( entry not in predictions ):
densecap_display = utils.decode_sequence(loader.get_vocab(), data['densecap'][k])
allscore = get_scores_separate([densecap_display],[sent]) # gd is the densecap and test is generation, len(common)/len(generation)
for bk in allscore:
visual[bk+"_gen/cap"].append(allscore[bk])
allscore_gd = get_scores_separate([gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img]],[sent])
for bkgd in allscore_gd:
visual[bkgd+"_gen/gd"].append(allscore_gd[bkgd])
allscore_capgd = get_scores_separate([gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img]],densecap_display)
for cap_bkgd in allscore_capgd:
visual[cap_bkgd+"_cap/gd"].append(allscore_capgd[cap_bkgd])
allscore_gd_flip = get_scores_separate([[sent]],gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img])
for bkgd in allscore_gd_flip:
visual[bkgd+"_gd/gen"].append(allscore_gd_flip[bkgd])
visual["image_id"].append(data['infos'][k]['id'])
visual["personality"].append(data['infos'][k]['personality'])
if split=='change':
visual["new_personality"].append(changed_personality)
visual['generation'].append(sent)
visual["gd"].append(gd_display[k*loader.seq_per_img:(k+1)*loader.seq_per_img])
visual["densecap"].append(densecap_display)
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('--------------------------------------------------------------------')
if split=='change':
print('image %s{%s--------->%s}: %s' %(entry['image_id'],changed_personality,entry['gd'], entry['caption']))
else:
print('image %s{%s}: %s' %(entry['image_id'],entry['gd'], entry['caption']))
print('--------------------------------------------------------------------')
# if we wrapped around the split or used up val imgs budget then bail
ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(ix0 - 1, ix1, loss))
if data['bounds']['wrapped']:
break
if num_images >= 0 and n >= num_images:
break
allwords = " ".join(visual['generation'])
allwords = allwords.split(" ")
print("sets length of allwords:",len(set(allwords)))
print("length of allwords:",len(allwords))
print("rate of set/all:",len(set(allwords))/len(allwords))
lang_stats = None
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, eval_kwargs['id'], split)
df = pd.DataFrame.from_dict(visual)
df.to_csv("visual_res/"+eval_kwargs['id']+"_"+str(split)+"_"+"visual.csv")
if use_joint==1:
ranks = evalrank(model, loader, eval_kwargs) if rank_eval else {}
# Switch back to training mode
model.train()
if use_joint==1:
losses = {k:v/loss_evals for k,v in losses.items()}
losses.update(ranks)
return losses, predictions, lang_stats
return loss_sum/loss_evals, predictions, lang_stats
def encode_data(model, loader, eval_kwargs={}):
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
dataset = eval_kwargs.get('dataset', 'coco')
# Make sure in the evaluation mode
model.eval()
loader_seq_per_img = loader.seq_per_img
loader.seq_per_img = 5
loader.reset_iterator(split)
n = 0
img_embs = []
cap_embs = []
while True:
data = loader.get_batch(split)
n = n + loader.batch_size
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks']]
tmp = utils.var_wrapper(tmp)
fc_feats, att_feats, labels, masks = tmp
with torch.no_grad():
img_emb = model.vse.img_enc(fc_feats)
cap_emb = model.vse.txt_enc(labels, masks)
# if we wrapped around the split or used up val imgs budget then bail
ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
if n > ix1:
img_emb = img_emb[:(ix1-n)*loader.seq_per_img]
cap_emb = cap_emb[:(ix1-n)*loader.seq_per_img]
# preserve the embeddings by copying from gpu and converting to np
img_embs.append(img_emb.data.cpu().numpy().copy())
cap_embs.append(cap_emb.data.cpu().numpy().copy())
if data['bounds']['wrapped']:
break
if num_images >= 0 and n >= num_images:
break
print("%d/%d"%(n,ix1))
img_embs = np.vstack(img_embs)
cap_embs = np.vstack(cap_embs)
assert img_embs.shape[0] == ix1 * loader.seq_per_img
loader.seq_per_img = loader_seq_per_img
return img_embs, cap_embs
def evalrank(model, loader, eval_kwargs={}):
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
dataset = eval_kwargs.get('dataset', 'coco')
fold5 = eval_kwargs.get('fold5', 0)
"""
Evaluate a trained model on either dev or test. If `fold5=True`, 5 fold
cross-validation is done (only for MSCOCO). Otherwise, the full data is
used for evaluation.
"""
print('Computing results...')
img_embs, cap_embs = encode_data(model, loader, eval_kwargs)
print('Images: %d, Captions: %d' %
(img_embs.shape[0] / 5, cap_embs.shape[0]))
if not fold5:
# no cross-validation, full evaluation
r, rt = i2t(img_embs, cap_embs, measure='cosine', return_ranks=True)
ri, rti = t2i(img_embs, cap_embs,
measure='cosine', return_ranks=True)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.1f" % rsum)
print("Average i2t Recall: %.1f" % ar)
print("Image to text: %.1f %.1f %.1f %.1f %.1f" % r)
print("Average t2i Recall: %.1f" % ari)
print("Text to image: %.1f %.1f %.1f %.1f %.1f" % ri)
else:
# 5fold cross-validation, only for MSCOCO
results = []
for i in range(5):
r, rt0 = i2t(img_embs[i * 5000:(i + 1) * 5000],
cap_embs[i * 5000:(i + 1) *
5000], measure='cosine',
return_ranks=True)
print("Image to text: %.1f, %.1f, %.1f, %.1f, %.1f" % r)
ri, rti0 = t2i(img_embs[i * 5000:(i + 1) * 5000],
cap_embs[i * 5000:(i + 1) *
5000], measure='cosine',
return_ranks=True)
if i == 0:
rt, rti = rt0, rti0
print("Text to image: %.1f, %.1f, %.1f, %.1f, %.1f" % ri)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.1f ar: %.1f ari: %.1f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
print("-----------------------------------")
print("Mean metrics: ")
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("rsum: %.1f" % (mean_metrics[10] * 6))
print("Average i2t Recall: %.1f" % mean_metrics[11])
print("Image to text: %.1f %.1f %.1f %.1f %.1f" %
mean_metrics[:5])
print("Average t2i Recall: %.1f" % mean_metrics[12])
print("Text to image: %.1f %.1f %.1f %.1f %.1f" %
mean_metrics[5:10])
return {'rsum':rsum, 'i2t_ar':ar, 't2i_ar':ari,
'i2t_r1':r[0], 'i2t_r5':r[1], 'i2t_r10':r[2], 'i2t_medr':r[3], 'i2t_meanr':r[4],
't2i_r1':ri[0], 't2i_r5':ri[1], 't2i_r10':ri[2], 't2i_medr':ri[3], 't2i_meanr':ri[4]}#{'rt': rt, 'rti': rti}
def i2t(images, captions, npts=None, measure='cosine', return_ranks=False):
"""
Images->Text (Image Annotation)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts is None:
npts = images.shape[0] // 5
index_list = []
ranks = np.zeros(npts)
top1 = np.zeros(npts)
for index in range(npts):
# Get query image
im = images[5 * index].reshape(1, images.shape[1])
# Compute scores
if measure == 'order':
bs = 100
if index % bs == 0:
mx = min(images.shape[0], 5 * (index + bs))
im2 = images[5 * index:mx:5]
d2 = order_sim(torch.Tensor(im2).cuda(),
torch.Tensor(captions).cuda())
d2 = d2.cpu().numpy()
d = d2[index % bs]
else:
d = np.dot(im, captions.T).flatten()
inds = np.argsort(d)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, captions, npts=None, measure='cosine', return_ranks=False):
"""
Text->Images (Image Search)
Images: (5N, K) matrix of images
Captions: (5N, K) matrix of captions
"""
if npts is None:
npts = images.shape[0] // 5
ims = np.array([images[i] for i in range(0, len(images), 5)])
ranks = np.zeros(5 * npts)
top1 = np.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = captions[5 * index:5 * index + 5]
# Compute scores
if measure == 'order':
bs = 100
if 5 * index % bs == 0:
mx = min(captions.shape[0], 5 * index + bs)
q2 = captions[5 * index:mx]
d2 = order_sim(torch.Tensor(ims).cuda(),
torch.Tensor(q2).cuda())
d2 = d2.cpu().numpy()
d = d2[:, (5 * index) % bs:(5 * index) % bs + 5].T
else:
d = | np.dot(queries, ims.T) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 03:36:18 2020
@author: Danish
"""
import numpy as np
import matplotlib.pyplot as plt
from epsilon_greedy import MAB, simulate_epsilon
from OIV import simulate_oiv
from UCB import simulate_ucb
from BayesianSampling import simulate_bayesian
def simulate_decaying_epsilon(means, N):
mabs = [MAB(means[0]), MAB(means[1]), MAB(means[2])]
samples = np.zeros(N)
explore = []
for i in range(N):
rand = np.random.random()
if rand<1/(i+1):
idx = np.random.choice(3)
explore.append(i)
else:
lst = []
for b in mabs:
lst.append(b.mean)
idx = | np.argmax(lst) | numpy.argmax |
import unittest
from unittest import TestCase
from fusedwind.plant_flow.vt import GenericWindTurbineVT, GenericWindTurbinePowerCurveVT, \
ExtendedWindTurbinePowerCurveVT, WeibullWindRoseVT, GenericWindRoseVT, GenericWindFarmTurbineLayout, WTPC, \
weibull2freq_array
from fusedwind.plant_flow.comp import WeibullWindRose
from fusedwind.fused_helper import init_container
from fusedwind.plant_flow.generate_fake_vt import *
from random import random
from numpy import array, vstack, linspace, pi, floor
import numpy as np
from numpy import ndarray, array, loadtxt, log, zeros, cos, arccos, sin, nonzero, argsort, NaN, mean, ones, vstack, \
linspace, exp, arctan, arange
from numpy import pi, sqrt, dot, diff
from numpy.testing import assert_array_almost_equal, assert_almost_equal
wr_inputs = {
'wind_directions': [0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.],
'wind_speeds': [4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.,
25.],
'weibull_array': array(
[[0.00000000e+00, 3.59550200e-02, 9.22421500e+00, 2.38867200e+00],
[3.00000000e+01, 3.94968400e-02, 9.88496400e+00, 2.44726600e+00],
[6.00000000e+01, 5.19472500e-02, 9.67463200e+00, 2.43164100e+00],
[9.00000000e+01, 7.01142500e-02, 1.00520300e+01, 2.60351600e+00],
[1.20000000e+02, 8.36171100e-02, 1.01233300e+01, 2.75585900e+00],
[1.50000000e+02, 6.43188100e-02, 9.64359200e+00, 2.59179700e+00],
[1.80000000e+02, 8.63938000e-02, 9.63384700e+00, 2.58007800e+00],
[2.10000000e+02, 1.17646400e-01, 1.05676900e+01, 2.54492200e+00],
[2.40000000e+02, 1.51493800e-01, 1.14521200e+01, 2.46679700e+00],
[2.70000000e+02, 1.47303000e-01, 1.17420100e+01, 2.60351600e+00],
[3.00000000e+02, 1.00075900e-01, 1.16922200e+01, 2.62304700e+00],
[3.30000000e+02, 5.16379700e-02, 1.01387300e+01, 2.32226600e+00]])}
wr_result = array(
[[1.44588988e-03, 3.35727582e-03, 3.81029809e-03, 4.02470558e-03, 3.98990151e-03, 3.73046644e-03,
3.29906324e-03, 2.76423767e-03, 2.19642754e-03, 1.65575871e-03, 1.18426976e-03, 8.03545915e-04,
5.17057409e-04, 3.15388270e-04, 1.82265836e-04, 9.97392412e-05, 5.16484433e-05, 2.52926787e-05,
1.17053815e-05, 5.11599317e-06, 2.11020415e-06, 5.08752361e-07],
[1.30490339e-03, 3.09884391e-03, 3.62718954e-03, 3.96056671e-03, 4.06990314e-03, 3.95614496e-03,
3.64866956e-03, 3.19840948e-03, 2.66737868e-03, 2.11724064e-03, 1.59962624e-03, 1.15013583e-03,
7.86692880e-04, 5.11652765e-04, 3.16234066e-04, 1.85620497e-04, 1.03401615e-04, 5.46258501e-05,
2.73472412e-05, 1.29640581e-05, 5.81491675e-06, 1.50264971e-06],
[1.77962417e-03, 4.19952053e-03, 4.87241564e-03, 5.26922670e-03, 5.35765892e-03, 5.14772140e-03,
4.68765926e-03, 4.05270640e-03, 3.32955013e-03, 2.60048377e-03, 1.93096518e-03, 1.36289927e-03,
9.14038683e-04, 5.82191700e-04, 3.51980669e-04, 2.01858052e-04, 1.09736471e-04, 5.65096359e-05,
2.75447988e-05, 1.26991432e-05, 5.53343327e-06, 1.39586432e-06],
[2.03574440e-03, 4.98929943e-03, 6.05198952e-03, 6.80997401e-03, 7.17340152e-03, 7.10897476e-03,
6.64589168e-03, 5.86836208e-03, 4.89625745e-03, 3.85928948e-03, 2.87201235e-03, 2.01614762e-03,
1.33368507e-03, 8.30343766e-04, 4.85929034e-04, 2.66934444e-04, 1.37448038e-04, 6.62437807e-05,
2.98390332e-05, 1.25432885e-05, 4.91332268e-06, 1.12525886e-06],
[2.15186806e-03, 5.43110528e-03, 6.80657106e-03, 7.86793420e-03, 8.46630869e-03, 8.52083209e-03,
8.03827862e-03, 7.11211238e-03, 5.89954420e-03, 4.58318141e-03, 3.32962029e-03, 2.25796289e-03,
1.42643914e-03, 8.37636776e-04, 4.56170072e-04, 2.29844137e-04, 1.06885088e-04, 4.57615873e-05,
1.79926754e-05, 6.48041565e-06, 2.13264517e-06, 4.17265933e-07],
[2.24784777e-03, 5.46088720e-03, 6.53473087e-03, 7.23314626e-03, 7.47069754e-03, 7.23396208e-03,
6.58303613e-03, 5.63593453e-03, 4.54016275e-03, 3.44009748e-03, 2.44976880e-03, 1.63787627e-03,
1.02684668e-03, 6.02850400e-04, 3.30951610e-04, 1.69634591e-04, 8.10558227e-05, 3.60483982e-05,
1.48978462e-05, 5.71209909e-06, 2.02861392e-06, 4.27007926e-07],
[2.84919811e-03, 6.90561292e-03, 8.24189952e-03, 9.10295837e-03, 9.38562803e-03, 9.07665485e-03,
8.25355050e-03, 7.06455491e-03, 5.69322871e-03, 4.31834242e-03, 3.08069972e-03, 2.06505246e-03,
1.29914820e-03, 7.66078219e-04, 4.22840120e-04, 2.18143544e-04, 1.05033331e-04, 4.71273715e-05,
1.96750146e-05, 7.63101473e-06, 2.74537834e-06, 5.84660929e-07],
[3.15103170e-03, 7.69357254e-03, 9.33336956e-03, 1.05666829e-02, 1.12688451e-02, 1.13813576e-02,
1.09204112e-02, 9.97189992e-03, 8.67355879e-03, 7.18843635e-03, 5.67618809e-03, 4.26887875e-03,
3.05612657e-03, 2.08130177e-03, 1.34730899e-03, 8.28320491e-04, 4.83210345e-04, 2.67221613e-04,
1.39953406e-04, 6.93493727e-05, 3.24798328e-05, 8.68186021e-06],
[3.38534246e-03, 8.22612132e-03, 9.98196140e-03, 1.13899884e-02, 1.23378363e-02, 1.27597240e-02,
1.26435937e-02, 1.20308435e-02, 1.10083382e-02, 9.69411732e-03, 8.21957641e-03, 6.71153300e-03,
5.27736822e-03, 3.99546450e-03, 2.91176859e-03, 2.04191583e-03, 1.37732536e-03, 8.93223652e-04,
5.56674970e-04, 3.33229170e-04, 1.91495304e-04, 6.08835103e-05],
[2.87303824e-03, 7.17906769e-03, 8.99864707e-03, 1.05685790e-02, 1.17476848e-02, 1.24323246e-02,
1.25706437e-02, 1.21697691e-02, 1.12938057e-02, 1.00525910e-02, 8.58338998e-03, 7.02938634e-03,
5.51945197e-03, 4.15307210e-03, 2.99270142e-03, 2.06379390e-03, 1.36094123e-03, 8.57478070e-04,
5.15753711e-04, 2.95876874e-04, 1.61746247e-04, 4.91779082e-05],
[2.02163959e-03, 5.06769635e-03, 6.37296122e-03, 7.50260428e-03, 8.35231542e-03, 8.84484216e-03,
8.94099994e-03, 8.64540798e-03, 8.00521430e-03, 7.10171159e-03, 6.03649545e-03, 4.91515685e-03,
3.83200463e-03, 2.85882995e-03, 2.03944379e-03, 1.39010175e-03, 9.04514324e-04, 5.61332477e-04,
3.31930481e-04, 1.86840603e-04, 1.00013602e-04, 2.98627202e-05],
[1.76963956e-03, 4.12232356e-03, 4.73613444e-03, 5.11273708e-03, 5.23107828e-03, 5.09941651e-03,
4.75207060e-03, 4.24236873e-03, 3.63329069e-03, 2.98774782e-03, 2.36035193e-03, 1.79197095e-03,
1.30756674e-03, 9.17018472e-04, 6.18067439e-04, 4.00285013e-04, 2.49050823e-04, 1.48827861e-04,
8.53960207e-05, 4.70346524e-05, 2.48592270e-05, 7.39661364e-06]])
class test_GenericWindTurbineVT(unittest.TestCase):
def test_init(self):
gwt = GenericWindTurbineVT()
class test_GenericWindTurbinePowerCurveVT(unittest.TestCase):
def test_init(self):
gwtpc = GenericWindTurbinePowerCurveVT()
def test_random(self):
wt = generate_random_GenericWindTurbinePowerCurveVT()
wt.test_consistency()
class test_ExtendedWindTurbinePowerCurveVT(unittest.TestCase):
def test_init(self):
ewtpc = ExtendedWindTurbinePowerCurveVT()
class test_GenericWindRoseVT(unittest.TestCase):
def test_init(self):
gwr = GenericWindRoseVT(**wr_inputs)
assert_almost_equal(gwr.frequency_array, wr_result)
def test_init_default(self):
gwr = GenericWindRoseVT(weibull_array=wr_inputs['weibull_array'])
assert_almost_equal(gwr.wind_speeds, array([ 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.,
15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25.]))
assert_almost_equal(gwr.wind_directions, gwr.weibull_array[:,0])
def test_change_resolution(self):
gwr = GenericWindRoseVT(**wr_inputs)
nws = int(random()*25)
nwd = int(random()*360)
new_wind_directions = np.linspace(0., 360., nwd)
new_wind_speeds = np.linspace(4., 25., nws)
gwr.change_resolution(wind_directions=new_wind_directions, wind_speeds=new_wind_speeds)
assert_almost_equal(gwr.frequency_array.shape, [nwd, nws])
| assert_almost_equal(gwr.wind_directions, new_wind_directions) | numpy.testing.assert_almost_equal |
import numpy as np
import torch
from typing import Tuple, List, Dict
from src.utils import load_obj_from_file
from src.utils.data import OmniDataset
from src.utils.data import data_utils
from src.utils.data import iterators
def classify_pad_bath(
ids: List[List[int]],
sizes: List[int],
pad: int
) -> Tuple[np.array, np.array]:
"""
Pad the instances to max length in the batch
:param ids: sequences to pad
:param sizes: size of each sequence
:param pad: pad index
:return:
"""
max_len = max(sizes)
batch_ids = np.array([np.array(seq + [pad] * (max_len - size)) for seq, size in zip(ids, sizes)])
mask = np.not_equal(batch_ids, pad).astype(int)
incremental_indices = np.cumsum(mask, axis=1) * mask
batch_pos = incremental_indices + pad
return batch_ids, batch_pos
def classify_collate(
batch_samples: Tuple[List[List[int]], List[int], List[int]],
tensor_type,
pad: int
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
collate function used to pad a batch of sampled examples
:param batch_samples: examples to pad
:param tensor_type: return tensor type
:param pad: pad index
:return:
"""
input_ids, sizes, labels = zip(*batch_samples)
input_ids, position_ids = classify_pad_bath(input_ids, sizes, pad)
return tensor_type(input_ids), tensor_type(position_ids), tensor_type(labels)
class ClassifyDataset(OmniDataset):
def __init__(self, group: Dict, pad: int, shuffle: bool = True):
super(ClassifyDataset, self).__init__(pad, shuffle)
self.input_ids = group.get("ids")
self.labels = group.get("labels")
self.sizes = np.array(group.get("sizes"))
self.epoch = 1
def __len__(self):
return len(self.input_ids)
def __getitem__(
self,
item
) -> Tuple[List[int], int, int]:
ids = self.input_ids[item]
size = self.sizes[item]
label = self.labels[item]
return (ids, size, label)
def num_tokens(
self,
index: int
) ->int:
return self.sizes[index]
def collate(
self,
samples: Tuple[List[List[int]], List[int], List[int]]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
return classify_collate(samples, self.tensor_type, self.pad_token_id)
def ordered_indices(self) -> np.array:
"""
Get a list or example's indixes ordered randomly or by sizes
"""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arrange(len(self))
indices = indices[np.argsort(self.sizes[indices], kind='mergesort')]
return indices
def ensure_positive_indices(self) -> np.array:
"""
Return a list of indices ordered by positive examples first
"""
indices = np.arange(len(self))
labels = np.array(self.labels)
positive_labels = labels > 0
if len(positive_labels.shape) == 2:
positive_labels = positive_labels.any(axis=1)
assert len(positive_labels.shape) == 1
positive_indices = indices[positive_labels]
negative_indices = indices[np.logical_not(positive_labels)]
if self.shuffle:
np.random.shuffle(positive_indices)
| np.random.shuffle(negative_indices) | numpy.random.shuffle |
import scipy.io as sio
from pathlib import Path
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
mask = np.array([np.ones(32), np.ones(32), np.ones(32),np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), | np.zeros(3) | numpy.zeros |
from abc import ABCMeta, abstractmethod, abstractproperty
from keras import Model, Sequential, Input
from keras.layers import Dense, LSTM, Average, Bidirectional, Dropout, Concatenate
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint, EarlyStopping
from functools import partial
from pathlib import Path
import statsmodels.api as sm
import keras.backend as K
import pickle
import numpy as np
import GPy as gpy
import random
import gpflow as gpf
import gpflow.multioutput.features as mf
import uuid
import os
from ordinal_tsf.util import to_channels, to_contiguous
MAX_FNAME_LENGTH = 200
LONG_FNAMES_FNAME = 'long_fnames.txt'
class ModelStrategy(object):
"""Provides a common interface for the forecasting strategy to be used at runtime."""
__metaclass__ = ABCMeta
filename = 'tmp_'
@abstractmethod
def fit(self, train_frames, **kwargs): pass
@abstractmethod
def predict(self, inputs, horizon=100, **kwargs): pass
@staticmethod
@abstractmethod
def load(fname, **kwargs): pass
@abstractmethod
def save(self, folder): pass
@staticmethod
@abstractmethod
def get_filename(params): pass
@abstractproperty
def seed_length(self): pass
class MordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordred'
def __init__(self, ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softmax',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
if self.n_channels > 1:
all_encoder_inputs = [Input(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
all_decoder_inputs = [Input(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_input = Concatenate(axis=-1)(all_encoder_inputs)
decoder_input = Concatenate(axis=-1)(all_decoder_inputs)
train_inputs = all_encoder_inputs + all_decoder_inputs
encoder_predict_inputs = all_encoder_inputs + [K.learning_phase()]
decoder_predict_inputs = all_decoder_inputs + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_input = Input(shape=(None, self.n_bins))
decoder_input = Input(shape=(None, self.n_bins))
train_inputs = [encoder_input, decoder_input]
encoder_predict_inputs = [encoder_input, K.learning_phase()]
decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_input)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_input)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
else:
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
if inputs.ndim > 3:
encoder_inputs = [inputs[:, :self.lookback, :, i] for i in range(inputs.shape[3])]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1, :, i] for i in range(inputs.shape[3])]
else:
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
posterior_mean = np.stack(samples).mean(axis=0).squeeze()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_mean.take(i_ch, axis=-1)
ch_samples = [np.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
else:
drawn_samples += [np.random.choice(self.n_bins, mc_samples, p=posterior_mean[t])
for t in range(predictive_horizon)]
drawn_samples = np.stack(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = MordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'mordred_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class MordredAutoencoderStrategy(MordredStrategy):
id = 'mordred_autoencoder'
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, : self.lookback, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, :self.lookback ]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
class MultilayerMordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'n_layers', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'multilayer_mordred'
def __init__(self, ordinal_bins=85, n_layers=2, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_layers = n_layers
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softmax',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
if self.n_channels > 1:
all_encoder_inputs = [Input(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
all_decoder_inputs = [Input(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_input = Concatenate(axis=-1)(all_encoder_inputs)
decoder_input = Concatenate(axis=-1)(all_decoder_inputs)
train_inputs = all_encoder_inputs + all_decoder_inputs
encoder_predict_inputs = all_encoder_inputs + [K.learning_phase()]
decoder_predict_inputs = all_decoder_inputs + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_input = Input(shape=(None, self.n_bins))
decoder_input = Input(shape=(None, self.n_bins))
train_inputs = [encoder_input, decoder_input]
encoder_predict_inputs = [encoder_input, K.learning_phase()]
decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()]
lstm_spec['return_sequences'] = False
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
lstm_spec['return_sequences'] = True
prev_encoder_bkwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = False
prev_encoder_fwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(prev_encoder_fwd(encoder_input))
_, h_bkwd, C_bkwd = encoder_bkwd(prev_encoder_bkwd(encoder_input))
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(prev_decoder(decoder_output))]
decoder_predict_outputs += [decoder_dense(prev_decoder(infr_decoder_output))]
decoder_predict_outputs += [infr_h, infr_C]
else:
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(prev_decoder(decoder_output))
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(prev_decoder(infr_decoder_output))
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
if inputs.ndim > 3:
encoder_inputs = [inputs[:, :self.lookback, :, i] for i in range(inputs.shape[3])]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1, :, i] for i in range(inputs.shape[3])]
else:
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
posterior_mean = np.stack(samples).mean(axis=0).squeeze()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_mean.take(i_ch, axis=-1)
ch_samples = [np.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
else:
drawn_samples += [np.random.choice(self.n_bins, mc_samples, p=posterior_mean[t])
for t in range(predictive_horizon)]
drawn_samples = np.stack(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels,
'n_layers': self.n_layers}
if fname is None:
fname = MultilayerMordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MultilayerMordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'multilayer_mordred_{}_bins_{}_hidden_{}_layers_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['n_layers'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MultilayerMordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class AttentionMordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordred'
def __init__(self, ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softmax',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
if self.n_channels > 1:
all_encoder_inputs = [Input(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
all_decoder_inputs = [Input(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_input = Concatenate(axis=-1)(all_encoder_inputs)
decoder_input = Concatenate(axis=-1)(all_decoder_inputs)
train_inputs = all_encoder_inputs + all_decoder_inputs
encoder_predict_inputs = all_encoder_inputs + [K.learning_phase()]
decoder_predict_inputs = all_decoder_inputs + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_input = Input(shape=(None, self.n_bins))
decoder_input = Input(shape=(None, self.n_bins))
train_inputs = [encoder_input, decoder_input]
encoder_predict_inputs = [encoder_input, K.learning_phase()]
decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_input)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_input)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
else:
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
if inputs.ndim > 3:
encoder_inputs = [inputs[:, :self.lookback, :, i] for i in range(inputs.shape[3])]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1, :, i] for i in range(inputs.shape[3])]
else:
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
posterior_mean = np.stack(samples).mean(axis=0).squeeze()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_mean.take(i_ch, axis=-1)
ch_samples = [np.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
else:
drawn_samples += [np.random.choice(self.n_bins, mc_samples, p=posterior_mean[t])
for t in range(predictive_horizon)]
drawn_samples = np.stack(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = MordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'mordred_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class MordredXStrategy(ModelStrategy):
required_spec_keys = ['n_ar_channels', 'n_exog_channels', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordredX'
def __init__(self, ar_ordinal_bins=85, exog_ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_ar_channels=1, n_exog_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_ar_bins = ar_ordinal_bins
self.n_exog_bins = exog_ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_ar_channels = n_ar_channels
self.n_exog_channels = n_exog_channels
#self.filename = 'mordredx_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_ARchannels_{}_EXchannels_{}'\
# .format(self.n_bins, self.n_hidden, self.dropout_rate, self.lam,
# self.lookback, self.horizon, self.n_ar_channels, self.n_exog_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'activation': 'softmax',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
all_encoder_inputs = [Input(shape=(None, self.n_ar_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_ar_channels)]
all_exog_encoder_inputs = [Input(shape=(None, self.n_exog_bins[i]), name='exog_encoder_channel_{}'.format(i + 1))
for i in range(self.n_exog_channels)]
all_decoder_inputs = [Input(shape=(None, self.n_ar_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_ar_channels)]
all_exog_decoder_inputs = [Input(shape=(None, self.n_exog_bins[i]), name='exog_decoder_channel_{}'.format(i + 1))
for i in range(self.n_exog_channels)]
encoder_input = Concatenate(axis=-1)(all_encoder_inputs + all_exog_encoder_inputs)
decoder_input = Concatenate(axis=-1)(all_decoder_inputs + all_exog_decoder_inputs)
train_inputs = all_encoder_inputs + all_exog_encoder_inputs + all_decoder_inputs + all_exog_decoder_inputs
encoder_predict_inputs = all_encoder_inputs + all_exog_encoder_inputs + [K.learning_phase()]
decoder_predict_inputs = all_decoder_inputs + all_exog_decoder_inputs + [infr_init_h,
infr_init_C,
K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_input)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_input)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_ar_channels):
dense_spec['units'] = self.n_ar_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states)
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
# output channels come before exogenous channels
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x_list):
return [x[:, :self.lookback] for x in x_list] + \
[x[:, self.lookback:self.lookback + self.horizon]
for x in x_list]
def get_outputs(x_list, n_ar=1):
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]
for x in x_list[:n_ar]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs,
get_outputs=partial(get_outputs, n_ar=self.n_ar_channels),
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, ar_input_list, exog_input_list=[], predictive_horizon=100, mc_samples=100):
#exog_input_list[i] must have at least lookback + predictive_horizon samples
exog_min_length = self.lookback + predictive_horizon
for i_exog, exog_input in enumerate(exog_input_list):
assert exog_input.shape[1] >= exog_min_length, '{} exog input has {} < {} samples'.format(i_exog,
exog_input.shape[1],
exog_min_length)
samples = [[] for _ in range(self.n_ar_channels)]
encoder_inputs = [inputs[:, :self.lookback, :] for inputs in ar_input_list + exog_input_list]
first_decoder_seed = [inputs[:, self.lookback:self.lookback+1, :] for inputs in ar_input_list + exog_input_list]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]] # length is number of AR channels
for t in range(1, predictive_horizon):
current_exog_input = [inputs[:, self.lookback+t:self.lookback+t+1, :] for inputs in exog_input_list]
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output[:-2]
+ current_exog_input
+ decoder_stochastic_output[-2:]
+ [True])
seq += [decoder_stochastic_output[:-2]]
for i_ch in range(self.n_ar_channels):
samples[i_ch] += [np.stack([s[i_ch] for s in seq], axis=-1).T.squeeze()]
posterior_mean = [np.stack(i_samples).mean(axis=0).squeeze() for i_samples in samples]
drawn_samples = []
for i_ch in range(self.n_ar_channels):
ch_posterior = posterior_mean[i_ch]
ch_samples = [np.random.choice(self.n_ar_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MordredXStrategy(**spec)
model.set_weights(weights_fname)
return model
def save(self, folder, fname=None):
save_obj = {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_ar_channels': self.n_ar_channels,
'n_exog_channels': self.n_exog_channels,
'ar_ordinal_bins':self.n_ar_bins,
'exog_ordinal_bins':self.n_exog_bins}
if fname is None:
fname = MordredXStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def get_spec(self):
return {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_ar_channels': self.n_ar_channels,
'n_exog_channels': self.n_exog_channels,
'ar_ordinal_bins':self.n_ar_bins,
'exog_ordinal_bins':self.n_exog_bins}
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MordredXStrategy.required_spec_keys])
fname = 'mordredx_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}_exog_{}'.format(model_spec[
'units'],
model_spec[
'dropout_rate'],
model_spec[
'lam'],
model_spec[
'lookback'],
model_spec[
'horizon'],
model_spec[
'n_ar_channels'],
model_spec[
'n_exog_channels']
)
return fname[:MAX_FNAME_LENGTH]
@property
def seed_length(self):
return self.lookback + 1
class SARIMAXStrategy(ModelStrategy):
filename = ''
id = 'sarimax'
def __init__(self, order, seasonal_order=(0,0,0,0)):
self.order = order
self.seasonal_order = seasonal_order
def fit(self, train_frames, **kwargs):
self.model = sm.tsa.statespace.SARIMAX(train_frames,
order=self.order,
seasonal_order=self.seasonal_order,
enforce_stationarity=False)
self.fit_res = self.model.fit(disp=False)
def predict(self, inputs, predictive_horizon=100, **kwargs):
pred = self.fit_res.get_forecast(steps=predictive_horizon)
return {'posterior_mean':pred.predicted_mean, 'posterior_std':np.sqrt(pred.var_pred_mean)}
@staticmethod
def load(fname, **kwargs):
this = None
with open(fname, 'r') as f:
this = pickle.load(f)
return this
def save(self, folder):
params = {'order':self.order, 'seasonal_order':self.seasonal_order}
with open(folder + SARIMAXStrategy.get_filename(params), 'wb') as f:
pickle.dump(self, f)
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'sarimax_{}_{}'.format(params['order'][0], params['seasonal_order'][0])
@property
def seed_length(self):
return 121
class ContinuousSeq2Seq(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'seq2seq'
def __init__(self, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, float, float, int, int, int, list) -> None
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = 'contseq2seq_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'mse'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_channels,
'activation': 'linear',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
encoder_input = Input(shape=(None, self.n_channels))
decoder_input = Input(shape=(None, self.n_channels))
train_inputs = [encoder_input, decoder_input]
encoder_predict_inputs = [encoder_input, K.learning_phase()]
decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_input)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_input)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states)
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
inputs = [train_frames[:, :self.lookback], train_frames[:, self.lookback:self.lookback + self.horizon]]
outputs = [train_frames[:, self.lookback + 1:self.lookback + self.horizon + 1]]
self.__sequence2sequence.fit(inputs, outputs, verbose=2, callbacks=callbacks, **kwargs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
return {'draws': np.stack(samples)}
def save(self, folder, fname=None):
save_obj = {'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = ContinuousSeq2Seq.get_filename(save_obj)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec):
assert all([k in model_spec for k in ContinuousSeq2Seq.required_spec_keys])
return 'seq2seq_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'r') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = ContinuousSeq2Seq(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class GPStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy."""
id = 'argp'
n_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1):
self.ker = ker + gpy.kern.White(lookback)
self.lookback = lookback
self.horizon = horizon
self.fname = 'gp_{}'.format(fname) # TODO: DEFINE KERNEL STR
self.model = None
@staticmethod
def load(fname):
with open(fname, 'r') as f:
obj = pickle.load(f)
return obj
def save(self, folder, fname=None):
if fname is None:
fname = self.fname
with open(folder + fname, 'wb') as f:
pickle.dump(self, f)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_max_train:
print("Time series is too long!") #Training on first {} samples".format(self.n_max_train)
self.model = gpy.models.GPRegression(train_frames[:self.n_max_train, :self.lookback, 0], # TODO: attractor compatibility
train_frames[:self.n_max_train, self.lookback:self.lookback+1, 0],
self.ker)
if restarts > 1:
self.model.optimize_restarts(restarts)
else:
self.model.optimize()
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
pred_inputs = inputs[:, :, 0]
assert pred_inputs.ndim == 2 # TODO: reshape for attractor compatibility
assert self.model is not None
pred_mean, pred_var = self.model.predict(pred_inputs)
pred_sigma = np.sqrt(pred_var)
samples = np.random.normal(loc=pred_mean, scale=pred_sigma, size=(mc_samples, 1))
draws = np.hstack((np.repeat(pred_inputs, axis=0, repeats=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict(draws[:, -self.seed_length:])
pred_sigma = np.sqrt(pred_var)#.clip(0.) # TODO: sigma greater than 0
samples = np.random.normal(loc=pred_mu, scale=pred_sigma)
draws = np.hstack((draws, samples))
return {'draws': draws[:, self.seed_length:]} # TODO: attractor compatibility
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'gp_{}'.format(params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class GPFlowStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy and GPflow."""
id = 'argpflow'
n_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1, model=None, x_ranges=None):
self.ker = ker + gpf.kernels.White(lookback)
self.lookback = lookback
self.horizon = horizon
if 'argpflow' not in fname:
self.fname = 'argpflow_{}'.format(fname)
else:
self.fname = fname
self.model = model
self.x_ranges = x_ranges
@staticmethod
def load(fname):
svr = gpf.saver.Saver()
model = svr.load(fname,
context=gpf.saver.SaverContext(autocompile=False))
model.clear()
model.compile()
horizon = 1
n_channels = model.Y.shape[-1]
lookback = model.X.shape[-1] // n_channels
x_ranges = [np.linspace(model.Y.value.min(axis=0), model.Y.value.max(axis=0), 1000)]
this_fname = fname.split('/')[-1]
return GPFlowStrategy(model.kern, lookback, horizon, this_fname, n_channels, model=model, x_ranges=x_ranges)
def save(self, folder, fname='coreg_gp_tmp', overwrite=True):
full_fname = '{}/{}'.format(folder, fname)
if os.path.isfile(full_fname):
if overwrite:
os.remove(full_fname)
else:
print('Permission denied to duplicate file, please enable overwrite flag.')
return -1
svr = gpf.saver.Saver()
svr.save(folder + fname, self.model)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_max_train:
print("Time series is too long!") #Training on first {} samples".format(self.n_max_train)
#self.model = gpy.models.GPRegression(train_frames[:self.n_max_train, :self.lookback, 0],
# train_frames[:self.n_max_train, self.lookback:self.lookback+1, 0],
# self.ker)
X = train_frames[:self.n_max_train, :self.lookback, 0]
Y = train_frames[:self.n_max_train, self.lookback:self.lookback + 1, 0]
self.x_ranges = [np.linspace(Y.min(), Y.max(), 1000)]
self.model = gpf.models.GPR(X, Y, kern=self.ker)
gpf.train.ScipyOptimizer().minimize(self.model)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
pred_inputs = inputs[:, -self.seed_length:, 0]
assert pred_inputs.ndim == 2 # TODO: reshape for attractor compatibility
assert self.model is not None
pred_mean, pred_var = self.model.predict_y(pred_inputs)
pred_sigma = np.sqrt(pred_var)
samples = np.random.normal(loc=pred_mean, scale=pred_sigma, size=(mc_samples, 1))
draws = np.hstack((np.repeat(pred_inputs, axis=0, repeats=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict_y(draws[:, -self.seed_length:])
pred_sigma = np.sqrt(pred_var)#.clip(0.) # TODO: sigma greater than 0
samples = np.random.normal(loc=pred_mu, scale=pred_sigma)
draws = np.hstack((draws, samples))
return {'draws': draws[:, -predictive_horizon:]}
@staticmethod
def get_filename(params):
# type: (dict) -> str
return 'argpflow_{}'.format(params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class FactorisedGPStrategy(ModelStrategy):
"""Implements the autoregressive Gaussian Process time series forecasting strategy."""
id = 'factorised_argp'
n_max_train = 10000
def __init__(self, ker, lookback=100, horizon=1, fname='tmp', n_channels=1):
self.ker = ker + gpy.kern.White(lookback)
self.lookback = lookback
self.horizon = horizon
self.fname = 'factorised_argp_{}'.format(fname) # TODO: DEFINE KERNEL STR
self.model = None
@staticmethod
def load(fname):
with open(fname, 'r') as f:
obj = pickle.load(f)
return obj
def save(self, folder, fname=None):
if fname is None:
fname = self.fname
with open(folder + fname, 'wb') as f:
pickle.dump(self, f)
def fit(self, train_frames, restarts=1):
if train_frames.shape[0] > self.n_max_train:
print("Time series is too long! Training on first {} samples".format(self.n_max_train))
self.model = gpy.models.GPRegression(train_frames[:self.n_max_train, :self.lookback, 0],
# TODO: attractor compatibility
train_frames[:self.n_max_train, self.lookback:self.lookback + 1, 0],
self.ker)
if restarts > 1:
self.model.optimize_restarts(restarts)
else:
self.model.optimize()
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
pred_inputs = inputs[:, :, 0]
assert pred_inputs.ndim == 2 # TODO: reshape for attractor compatibility
assert self.model is not None
pred_mean, pred_var = self.model.predict(pred_inputs)
pred_sigma = np.sqrt(pred_var)
samples = np.random.normal(loc=pred_mean, scale=pred_sigma, size=(mc_samples, 1))
draws = np.hstack((np.repeat(pred_inputs, axis=0, repeats=mc_samples), samples))
for i in range(predictive_horizon - 1):
pred_mu, pred_var = self.model.predict(draws[:, -self.seed_length:])
pred_sigma = np.sqrt(pred_var) # .clip(0.) # TODO: sigma greater than 0
samples = np.random.normal(loc=pred_mu, scale=pred_sigma)
draws = np.hstack((draws, samples))
return {'draws': draws[:, self.seed_length:]} # TODO: attractor compatibility
@staticmethod
def get_filename(params):
# type: (dict) -> str
return '{}_{}'.format('factorised_argp', params.get('fname', 'tmp'))
@property
def seed_length(self):
return self.lookback
class CoregionalisedGPStrategy(ModelStrategy):
"""Implements the autoregressive mixture-of-kernels Gaussian Process time series forecasting strategy."""
id = 'coreg_argp'
n_max_train = 4000
n_max_iter = 15000
default_p_inducing = 0.2
def __init__(self, ker, lookback=100, horizon=1, fname='', n_channels=1, model=None, x_ranges=None):
self.model = model
if 'coreg_argp' not in fname:
self.fname = 'coreg_argp_{}'.format(fname)
else:
self.fname = fname
self.ker = ker
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.x_ranges = x_ranges
@staticmethod
def load(fname):
svr = gpf.saver.Saver()
model = svr.load(fname,
context=gpf.saver.SaverContext(autocompile=False))
model.clear()
model.compile()
horizon = 1
n_channels = model.Y.shape[-1]
lookback = model.X.shape[-1] // n_channels
x_mins = model.Y.value.min(axis=0)
x_max = model.Y.value.max(axis=0)
x_ranges = [np.linspace(xmi, xma, 1000) for xmi, xma in zip(x_mins, x_max)]
this_fname = fname.split('/')[-1]
return CoregionalisedGPStrategy(model.kern,
lookback,
horizon,
this_fname,
n_channels,
model=model,
x_ranges=x_ranges)
def save(self, folder, fname='coreg_gp_tmp', overwrite=True):
full_fname = '{}/{}'.format(folder, fname)
if os.path.isfile(full_fname):
if overwrite:
os.remove(full_fname)
else:
print('Permission denied to duplicate file, please enable overwrite flag.')
return -1
svr = gpf.saver.Saver()
svr.save(folder + fname, self.model)
def fit(self, train_frames, feature_func=None, n_inducing=200, init_mu_var=None):
if train_frames.shape[0] > self.n_max_train:
print("Training on last {} samples".format(self.n_max_train))
X, Y = to_contiguous(train_frames[-self.n_max_train:, :self.lookback]), \
to_contiguous(train_frames[-self.n_max_train:, -self.horizon:])
else:
X, Y = to_contiguous(train_frames[:, :self.lookback]), \
to_contiguous(train_frames[:, -self.horizon:])
idx_inducing = np.arange(X.shape[0])
np.random.shuffle(idx_inducing)
feature = feature_func(X, n_inducing, self.n_channels)
if init_mu_var is None:
self.model = gpf.models.SVGP(X, Y, self.ker, gpf.likelihoods.Gaussian(), feat=feature)
else:
self.model = gpf.models.SVGP(X, Y,
self.ker,
gpf.likelihoods.Gaussian(),
feat=feature,
q_mu=init_mu_var['q_mu'],
q_sqrt=init_mu_var['q_sqrt'])
x_mins = Y.min(axis=0)
x_max = Y.max(axis=0)
self.x_ranges = [np.linspace(xmi, xma, 1000) for xmi, xma in zip(x_mins, x_max)]
opt = gpf.train.ScipyOptimizer()
opt.minimize(self.model, disp=True, maxiter=self.n_max_iter)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
# inputs must be (1, lookback, channels)
if inputs.ndim < 3:
pred_seed = inputs[np.newaxis, :].copy()
else:
pred_seed = inputs.copy()
mu_0, sig_0 = self.model.predict_y(to_contiguous(pred_seed[0]).T)
pred_samples = np.random.normal(mu_0, np.sqrt(sig_0), (mc_samples, 1, self.n_channels))
pred_input = np.concatenate([ | np.repeat(pred_seed, repeats=mc_samples, axis=0) | numpy.repeat |
# -*- coding: utf-8 -*-
#
# Test proper functionality of Syncopy's `BaseData` class + helpers
#
# Builtin/3rd party package imports
import os
import tempfile
import h5py
import time
import pytest
import numpy as np
from numpy.lib.format import open_memmap
from memory_profiler import memory_usage
# Local imports
from syncopy.datatype import AnalogData
import syncopy.datatype as spd
from syncopy.datatype.base_data import VirtualData
from syncopy.shared.errors import SPYValueError, SPYTypeError
from syncopy.tests.misc import is_win_vm, is_slurm_node
# Construct decorators for skipping certain tests
skip_in_vm = pytest.mark.skipif(is_win_vm(), reason="running in Win VM")
skip_in_slurm = pytest.mark.skipif(is_slurm_node(), reason="running on cluster node")
class TestVirtualData():
# Allocate test-dataset
nChannels = 5
nSamples = 30
data = np.arange(1, nChannels * nSamples + 1).reshape(nSamples, nChannels)
def test_alloc(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "vdat")
np.save(fname, self.data)
dmap = open_memmap(fname + ".npy")
# illegal type
with pytest.raises(SPYTypeError):
VirtualData({})
# 2darray expected
d3 = np.ones((2, 3, 4))
np.save(fname + "3", d3)
d3map = open_memmap(fname + "3.npy")
with pytest.raises(SPYValueError):
VirtualData([d3map])
# rows/cols don't match up
with pytest.raises(SPYValueError):
VirtualData([dmap, dmap.T])
# check consistency of VirtualData object
for vk in range(2, 6):
vdata = VirtualData([dmap] * vk)
assert vdata.dtype == dmap.dtype
assert vdata.M == dmap.shape[0]
assert vdata.N == vk * dmap.shape[1]
# Delete all open references to file objects b4 closing tmp dir
del dmap, vdata, d3map
def test_retrieval(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "vdat.npy")
fname2 = os.path.join(tdir, "vdat2.npy")
np.save(fname, self.data)
| np.save(fname2, self.data * 2) | numpy.save |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.