prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import pybullet as p
from .env import AssistiveEnv
from .agents import furniture
from .agents.furniture import Furniture
class FeedingNewEnv(AssistiveEnv):
def __init__(self, robot, human):
super(FeedingNewEnv, self).__init__(robot=robot, human=human, task='feeding', obs_robot_len=(18 + len(robot.controllable_joint_indices) - (len(robot.wheel_joint_indices) if robot.mobile else 0)), obs_human_len=(19 + len(human.controllable_joint_indices)))
def step(self, action):
if self.human.controllable:
action = np.concatenate([action['robot'], action['human']])
self.take_step(action)
obs = self._get_obs()
reward_food, food_mouth_velocities, food_hit_human_reward = self.get_food_rewards()
# Get human preferences
end_effector_velocity = np.linalg.norm(self.robot.get_velocity(self.robot.right_end_effector))
preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=self.total_force_on_human, tool_force_at_target=self.spoon_force_on_human, food_hit_human_reward=food_hit_human_reward, food_mouth_velocities=food_mouth_velocities)
spoon_pos, spoon_orient = self.tool.get_base_pos_orient()
reward_distance_mouth_target = -np.linalg.norm(self.target_pos - spoon_pos) # Penalize robot for distance between the spoon and human mouth.
reward_action = -np.linalg.norm(action) # Penalize actions
reward = self.config('distance_weight')*reward_distance_mouth_target + self.config('action_weight')*reward_action + self.config('food_reward_weight')*reward_food + preferences_score
# print(self.config('distance_weight')*reward_distance_mouth_target, self.config('action_weight')*reward_action, self.config('food_reward_weight')*reward_food, preferences_score)
if self.gui and reward_food != 0:
print('Task success:', self.task_success, 'Food reward:', reward_food)
info = {'total_force_on_human': self.total_force_on_human, 'task_success': int(self.task_success >= self.total_food_count*self.config('task_success_threshold')), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
done = self.iteration >= 200
if not self.human.controllable:
return obs, reward, done, info
else:
# Co-optimization with both human and robot controllable
return obs, {'robot': reward, 'human': reward}, {'robot': done, 'human': done, '__all__': done}, {'robot': info, 'human': info}
def get_total_force(self):
robot_force_on_human = np.sum(self.robot.get_contact_points(self.human)[-1])
spoon_force_on_human = np.sum(self.tool.get_contact_points(self.human)[-1])
return robot_force_on_human, spoon_force_on_human
def get_food_rewards(self):
# Check all food particles to see if they have left the spoon or entered the person's mouth
# Give the robot a reward or penalty depending on food particle status
food_reward = 0
food_hit_human_reward = 0
food_mouth_velocities = []
foods_to_remove = []
foods_active_to_remove = []
for f in self.foods:
food_pos, food_orient = f.get_base_pos_orient()
distance_to_mouth = | np.linalg.norm(self.target_pos - food_pos) | numpy.linalg.norm |
import numpy as np
## Channel: EEPR4 memory channel and AWGN
class Channel_vit(object):
def __init__(self, channel_machine, dummy_list, ini_state):
self.channel_machine = channel_machine
self.dummy_list = dummy_list
self.ini_state = ini_state
self.len_dummy = self.dummy_list[0].shape[1]
self.num_input_sym = int(self.channel_machine['in_out'].shape[1] / 2)
def e2pr4_channel(self, x):
'''
Input: (1, length) array
Output: (1, len + dummy_len) array
Mapping: channel state machine to zero state
'''
# remember 5 dummy values in the end
length = x.shape[1] - self.len_dummy
y = np.zeros((1, x.shape[1]))
# Memory channel
state = self.ini_state
for i in range(0, length, self.num_input_sym):
set_in = np.where(self.channel_machine['state_machine'][:, 0]==state)[0]
idx_in = set_in[np.where(self.channel_machine['in_out'][set_in, 0]==x[:, i])[0]]
y[:, i] = self.channel_machine['in_out'][idx_in, 1]
state = self.channel_machine['state_machine'][idx_in, 1]
# Dummy bits to zero state
path_dummy = self.dummy_list[state[0]]
for i in range(0, self.len_dummy, self.num_input_sym):
set_in = np.where(self.channel_machine['state_machine'][:, 0]==state)[0]
idx_in = (set_in[np.where(self.channel_machine['state_machine'][set_in, 1]==
path_dummy[:, i])[0]])
y[:, i+length] = self.channel_machine['in_out'][idx_in, 1]
state = path_dummy[:, i]
return y
def awgn(self, x, snr):
scaling_para = 0.25
sigma = np.sqrt(scaling_para * 10 ** (- snr * 1.0 / 10))
return x + sigma * np.random.normal(0, 1, x.shape)
## Channel: EEPR4 memory channel and AWGN
class Channel_bcjr(object):
def __init__(self, channel_machine, ini_state):
self.channel_machine = channel_machine
self.ini_state = ini_state
self.num_input_sym = int(self.channel_machine['in_out'].shape[1] / 2)
def e2pr4_channel(self, x):
'''
Input: (1, length) array
Output: (1, len + dummy_len) array
Mapping: channel state machine to zero state
'''
length = x.shape[1]
y = np.zeros((1, length))
# Memory channel
state = self.ini_state
for i in range(0, length, self.num_input_sym):
set_in = np.where(self.channel_machine['state_machine'][:, 0]==state)[0]
idx_in = set_in[np.where(self.channel_machine['in_out'][set_in, 0]==x[:, i])[0]]
y[:, i] = self.channel_machine['in_out'][idx_in, 1]
state = self.channel_machine['state_machine'][idx_in, 1]
return y
def awgn(self, x, snr):
sigma = np.sqrt(0.25 * 10 ** (- snr * 1.0 / 10))
return x + sigma * | np.random.normal(0, 1, x.shape) | numpy.random.normal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
braggutils: utilities around the Bragg's law ($ n \lambda = 2 d sin \theta $)
"""
import warnings
import numpy as np
import logging
try:
import scipy.constants.codata as const
HAS_CODATA = True
h = const.value("Planck constant in eV s") # eV s
c = const.value("speed of light in vacuum") # m s^-1
HC = h * c
except ImportError:
HAS_CODATA = False
HC = 1.2398418743309972e-06 # eV * m
# GLOBAL VARIABLES
HKL_MAX = 30 # maximum number of hkl index considered
SI_ALAT = 5.431065 # Ang at 25C
GE_ALAT = 5.6579060 # Ang at 25C
INSB_ALAT = 6.48 # cubic
SIO2_A = 4.913 # beta-quartz, hexagonal
SIO2_C = 5.405
_logger = logging.getLogger(__name__)
def ev2wlen(energy):
"""convert photon energy (E, eV) to wavelength ($\lambda$, \AA$^{-1}$)"""
return (HC / energy) * 1e10
def wlen2ev(wlen):
"""convert photon wavelength ($\lambda$, \AA$^{-1}$) to energy (E, eV)"""
return (HC / wlen) * 1e10
def kev2wlen(energy):
"""convert photon energy (E, keV) to wavelength ($\lambda$, \AA$^{-1}$)"""
return (HC / energy) * 1e7
def wlen2kev(wlen):
"""convert photon wavelength ($\lambda$, \AA$^{-1}$) to energy (E, keV)"""
return (HC / wlen) * 1e7
def kev2ang(ene, d=0, deg=True):
"""energy (keV) to Bragg angle (deg/rad) for given d-spacing (\AA)"""
if d == 0:
_logger.error("kev2deg: d-spacing is 0")
return 0
else:
_ang = np.arcsin((kev2wlen(ene)) / (2 * d))
if deg is True:
_ang = np.rad2deg(_ang)
return _ang
def ang2kev(theta, d=0, deg=True):
"""Bragg angle (deg/rad) to energy (keV) for given d-spacing (\AA)"""
if deg is True:
theta = np.deg2rad(theta)
return wlen2kev(2 * d * np.sin(theta))
def bragg_ev(theta, d, n=1):
"""return the Bragg energy (eV) for a given d-spacing (\AA) and angle (deg)"""
return wlen2ev((2 * d * np.sin(np.deg2rad(theta))) / n)
def theta_b(wlen, d, n=1):
"""return the Bragg angle, $\theta_{B}$, (deg) for a given wavelength
(\AA$^{-1}$) and d-spacing (\AA)"""
if not (d == 0):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_thb = np.rad2deg(np.arcsin(((wlen * n) / (2 * d))))
return _thb
except Exception:
return 0
else:
return 0
def bragg_th(ene, d, n=1):
"""return the Bragg angle, $\theta_{B}$, (deg) for a given energy (eV)
and d-spacing (\AA)"""
return theta_b(ev2wlen(ene), d, n=n)
def xray_bragg(element, line, dspacing, retAll=False):
"""return the Bragg angle for a given element/line and crystal d-spacing"""
from sloth.utils.xdata import xray_line
line_ene = xray_line(element, line)
try:
theta = bragg_th(line_ene, dspacing)
except Exception:
theta = "-"
pass
if retAll:
return (element, line, line_ene, theta)
else:
return theta
def cotdeg(theta):
"""return the cotangent (= cos/sin) of theta given in degrees"""
dtheta = np.deg2rad(theta)
return np.cos(dtheta) / np.sin(dtheta)
def de_bragg(theta, dth):
"""energy resolution $\frac{\Delta E}{E}$ from derivative of Bragg's law
$|\frac{\Delta E}{E}| = |\frac{\Delta \theta}{\theta} = \Delta \theta \cot(\theta)|$
"""
return dth * cotdeg(theta)
def sqrt1over(d2m):
if d2m == 0:
return 0
else:
return np.sqrt(1 / d2m)
def d_cubic(a, hkl, **kws):
"""d-spacing for a cubic lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
d2m = (h ** 2 + k ** 2 + l ** 2) / a ** 2
return sqrt1over(d2m)
def d_tetragonal(a, c, hkl, **kws):
"""d-spacing for a tetragonal lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
d2m = (h ** 2 + k ** 2) / a ** 2 + (l ** 2 / c ** 2)
return sqrt1over(d2m)
def d_orthorhombic(a, b, c, hkl, **kws):
"""d-spacing for an orthorhombic lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
d2m = (h ** 2 / a ** 2) + (k ** 2 / b ** 2) + (l ** 2 / c ** 2)
return sqrt1over(d2m)
def d_hexagonal(a, c, hkl, **kws):
"""d-spacing for an hexagonal lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
d2m = 4.0 / 3.0 * ((h ** 2 + h * k + k ** 2) / a ** 2) + (l ** 2 / c ** 2)
return sqrt1over(d2m)
def d_monoclinic(a, b, c, beta, hkl, **kws):
"""d-spacing for a monoclinic lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
rbeta = np.deg2rad(beta)
d2m = (
1.0
/ np.sin(rbeta) ** 2
* (
(h ** 2 / a ** 2)
+ ((h ** 2 * np.sin(rbeta) ** 2) / b ** 2)
+ (l ** 2 / c ** 2)
- ((2 * h * l * np.cos(rbeta) / (a * c)))
)
)
return sqrt1over(d2m)
def d_triclinic(a, b, c, alpha, beta, gamma, hkl, **kws):
"""d-spacing for a triclinic lattice"""
h, k, l = hkl[0], hkl[1], hkl[2]
ralpha = np.deg2rad(alpha)
rbeta = np.deg2rad(beta)
rgamma = np.deg2rad(gamma)
cosralpha = | np.cos(ralpha) | numpy.cos |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------------
# Name: rigidbody.py
# Purpose: Utility functions useful for computer graphics, especially related to
# rigid body transformations
#
# Author: <NAME>
#
# Created: 07/11/2012
# Modified: 03/30/2017
# Copyright: (c) <NAME>, 2012 - 2017
# Licence: MIT License
#-----------------------------------------------------------------------------------------
"""utility functions related to rigid body transformations for both computer vision and
computer graphics
"""
from __future__ import print_function, division
import numpy as _np
import sympy as _sy
def dual_matrix(vec):
"""dual matrix (or the hat operator in skew theory), which is the skew-symmetric
matrix associated with the 3x1 vector
Parameters
----------
vec : ndarray
3-element numpy array in :math:`\mathbb{R}^3` space
Returns
-------
vec_hat : numpy matrix
the skew-symmetric matrix, a square matrix, associated with the vector ``vec``.
Notes
-----
Given a vector :math:`v = [v_1, v_2, v_3]^T \in \mathbb{R}^3` the hat operator is
.. math::
\hat{v} =
\\left[\\begin{array}{ccc}
0 & -v_3 & v_2 \\\\
v_3 & 0 & -v_1 \\\\
-v_2 & v_1 & 0
\end{array}\\right]
This isomorphism is represented mathematically as
:math:`\\bigwedge : \mathbb{R}^3 \\rightarrow so(3); u \\mapsto \\hat{u}`
Examples
--------
The following example also demonstrates a property of the dual matrix
:math:`\\hat{u}`, that the column (and row) vectors span the subspace
orthogonal to the vector :math:`u`
>>> u = np.array([1, 2, 3])
>>> uhat = cg.dual_matrix(u)
>>> uhat
matrix([[ 0., -3., 2.],
[ 3., 0., -1.],
[-2., 1., 0.]])
>>> np.dot(uhat, u)
matrix([[ 0., 0., 0.]])
>>> np.dot(uhat.T, u)
matrix([[ 0., 0., 0.]])
"""
x, y, z = vec.reshape(-1)
return _np.matrix(((0.0, -z, y), (z, 0.0, -x), (-y, x, 0.0)))
def skew(vec):
"""return the skew-symmetric matrix from 3x1 vector ``vec``.
Parameters
----------
vec : ndarray
3-element numpy array in :math:`\mathbb{R}^3` space
Returns
-------
vec_hat : ndarray
the skew-symmetric matrix, a square matrix, associated with the vector ``vec``.
Notes
-----
This functions is same as ``dual_matrix()``, except that it returns ndarray.
"""
return _np.asarray(dual_matrix(vec))
def rotMat2D(angle, atype='r'):
"""returns rotation matrix to rotate a vector/point in 2-D by `angle` about the
origin in counter-clockwise direction
Positive `angle` corresponds to rotation in counter-clockwise direction.
Usage: ``rotMat2D(angle [,atype]) -> R``
Parameters
----------
angle : float
the angle of rotation
atype : string ('r' or 'd')
r = radians (default)
d = degrees
Returns
-------
r : numpy matrix
the rotation matrix
Notes
-----
The rotation matrix, :math:`R \in SO(2)`, returned is the following form:
.. math::
R(\\theta) =
\\left[\\begin{array}{lr}
cos(\\theta) & - sin(\\theta) \\\\
sin(\\theta) & cos(\\theta)
\end{array}\\right]
The rotation matrix :math:`R` rotates points/vectors in the xy-Cartesian plane
counter-clockwise by an angle :math:`\\theta` about the origin of the cartesian
coordinate system.
To perform the rotation using the matrix :math:`R`, the position of each
point must be represented by a column vector :math:`v`, containing the coordinates
of the point. A rotated vector is obtained by using the matrix multiplication
:math:`Rv`.
"""
if atype=='d':
angle = _np.radians(angle)
r = _np.matrix(((_np.cos(angle),-_np.sin(angle)),
(_np.sin(angle), _np.cos(angle))))
return r
def rotMat3D(axis, angle, atype='r', tol=1e-12):
"""returns 3D rotation matrix for rotating a vector/point about an arbitrary
`axis` by `angle` in RHS.
Parameters
----------
axis : 3-tuple
(x, y, z) represent the arbitrary axis about which to rotate
angle : float
the rotation angle.
atype : string
the unit of the rotation angle. ``r`` = radian (default),
``d`` = degree.
tol : float (default=1e-12)
set values below absolute of ``tol`` to zero
Returns
-------
r : numpy matrix
the 3x3 rotation matrix.
Notes
-----
the rotation matrix is computed using the Rodrigues' rotation formula [1]_, [2]_:
.. math::
R(\\theta) = I cos(\\theta) + sin(\\theta) \\hat{k} + (1 - cos(\\theta))kk^T
where, :math:`\\theta` is the angle of rotation, and :math:`k` is the axis about
which the rotation is to be performed.
References
----------
.. [1] Axis-angle representation : https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation
.. [2] Rodrigues' rotation formula : https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
"""
t = _np.radians(angle) if atype=='d' else angle
cos, sin, I = _np.cos, _np.sin, _np.identity
k = _np.array(axis).reshape(3, 1)
r = cos(t)*I(3) + sin(t)*dual_matrix(k) + (1 - cos(t))*k*k.T
r[_np.abs(r) < tol] = 0.0
return r
def rot2(theta, deg=True):
"""returns 2D rotation matrix :math:`R \in SO(2)` to rotate a vector/point in a
plane in counter-clockwise direction
Parameters
----------
theta : float
the angle of rotation
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
r : ndarray
the rotation matrix
Notes
-----
Same as the function ``rotMat2D()`` execpt for slight change in the input parameter
specification, plus ``rot2()`` returns ``ndarray`` instead of numpy matrix.
See the function's docstring for details.
See Also
--------
rotMat2D()
"""
atype = 'd' if deg else 'r'
return _np.asarray(rotMat2D(angle=theta, atype=atype))
def rotX(theta, deg=True):
"""3D matrix :math:`R \in SO(3)` for rotating a vector/point about the x-axis
Parameters
----------
theta : float
the angle of rotation about x-axis
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
r : numpy 3x3 matrix
the rotation matrix
Notes
-----
The rotation matrix, :math:`R \in SO(3)`, returned is the following form:
.. math::
R(\\theta) =
\\left[\\begin{array}{ccc}
1 & 0 & 0 \\\\
0 & cos(\\theta) & - sin(\\theta)\\\\
0 & sin(\\theta) & cos(\\theta)
\end{array}\\right]
See also: `rotMat3D()` for rotation about an arbitrary axis using axis angle formula
"""
axis = (1, 0, 0)
angle = _np.deg2rad(theta) if deg else theta
return rotMat3D(axis, angle)
def rotY(theta, deg=True):
"""returns 3D matrix :math:`R \in SO(3)` for rotating a vector/point about the y-axis
Parameters
----------
theta : float
the angle of rotation about y-axis
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
r : numpy 3x3 matrix
the rotation matrix
Notes
-----
The rotation matrix, :math:`R \in SO(3)`, returned is the following form:
.. math::
R(\\theta) =
\\left[\\begin{array}{ccc}
cos(\\theta) & 0 & sin(\\theta)\\\\
0 & 1 & 0 \\\\
-sin(\\theta) & 0 & cos(\\theta)
\end{array}\\right]
See also: `rotMat3D()` for rotation about an arbitrary axis using axis angle formula
"""
axis = (0, 1, 0)
angle = _np.deg2rad(theta) if deg else theta
return rotMat3D(axis, angle)
def rotZ(theta, deg=True):
"""returns 3D matrix :math:`R \in SO(3)` for rotating a vector/point about the z-axis
Parameters
----------
theta : float
the angle of rotation about x-axis
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
r : numpy 3x3 matrix
the rotation matrix
Notes
-----
The rotation matrix, :math:`R \in SO(3)`, returned is the following form:
.. math::
R(\\theta) =
\\left[\\begin{array}{ccc}
cos(\\theta) & -sin(\\theta) & 0 \\\\
sin(\\theta) & cos(\\theta) & 0 \\\\
0 & 0 & 1
\end{array}\\right]
See also: `rotMat3D()` for rotation about an arbitrary axis using axis angle formula
"""
axis = (0, 0, 1)
angle = _np.deg2rad(theta) if deg else theta
return rotMat3D(axis, angle)
def euler2rot(angle1, angle2, angle3, order='X-Y-Z', ertype='extrinsic', deg=True):
"""returns rotation matrix from Euler angles
Parameters
----------
angle1 : float
angle for first rotation
angle2 : float
angle for second rotation
angle3 : float
angle for third rotation
order : string
valid string sequence that specifies the order of rotation. Examples are 'X-Y-Z',
'Z-Y-Z', 'z-x-z', 'z-x-y'. Furthermore, if `ertype` is "intrinsic", then 'X-Y-Z'
returns :math:`R=R_x(\\angle1)*R_y(\\angle2)*R_z(\\angle3)` and if `ertype` is "extrinsic",
then the same sequence, 'X-Y-Z', returns :math:`R=R_z(\\angle3)*R_y(\\angle2)*R_x(\\angle1)`
ertype : string ('extrinsic' or 'intrinsic')
the type of elemental rotations. `extrinsic` represent rotations about the axes of
the fixed origial coordinate system, `intrinsic` (or fixed-body) represent rotations
about the axes of the rotating coordinate system attached to the rigid body
deg : bool
`True` = degree (default), `False` = radians
Returns
-------
r : ndarray
the rotation matrix
Notes
-----
1. In the context of this function "Euler angles" constitutes both the Proper Euler angles
and the Tait-Bryan angles.
2. The order of the input angles are specified in the order of rotations (corresponding
to the `order`). They are not specified with respect to any particular axis.
References
----------
.. [1] Euler angles: https://en.wikipedia.org/wiki/Euler_angles
"""
X = rotX
Y = rotY
Z = rotZ
order = order.upper()
order = order.split('-')
if not set(order).issubset({'X', 'Y', 'Z'}):
raise ValueError('Incorrect order parameter ({}) specified.'.format(order))
if ertype == 'extrinsic':
order.reverse()
composition = '{}(angle3, deg)*{}(angle2, deg)*{}(angle1, deg)'.format(*order)
elif ertype == 'intrinsic':
composition = '{}(angle1, deg)*{}(angle2, deg)*{}(angle3, deg)'.format(*order)
else:
raise ValueError('Incorrect elemental rotation parameter ({}) specified.'.format(ertype))
#print(composition)
return eval(composition)
def se2(x, y, theta=0, deg=True):
"""returns homogeneous transformation matrix SE(2) for planar translation and rotation
Parameters
----------
x : float
translation along x-axis
y : float
translation along y-axis
theta : float
angle of rotation in the plane
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
T : numpy matrix
homogeneous 3x3 transformation matrix of the form:
.. math::
T(x, y, \\theta) =
\\left[\\begin{array}{ccc}
cos(\\theta) & - sin(\\theta) & x \\\\
sin(\\theta) & cos(\\theta) & y \\\\
0 & 0 & 1
\end{array}\\right]
Example
-------
>>> T = rg.se2(1, 2, 30)
matrix([[ 0.8660254, -0.5 , 1. ],
[ 0.5 , 0.8660254, 2. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] Robotics, Vision and Control: Fundamental Algorithms in MATLAB, <NAME>
.. [2] Code: https://github.com/petercorke/robotics-toolbox-matlab
"""
T = homo(rot2(theta, deg))
T[:2, 2] = [x, y]
return _np.matrix(T)
#%% Utility functions
def homo(m):
"""returns a homogeneous matrix constructed from the matrix `m`
Parameters
----------
m : ndarray or numpy matrix
a 2x2 or 3x3 ndarray or matrix. Usually `m` is SO(2) or SO(3).
Returns
-------
h : ndarray or numpy matrix
a 3x3 or 4x4 homogeneous matrix
"""
rows, cols = m.shape
assert rows == cols
h = _np.eye(rows + 1, cols + 1)
h[:rows, :cols] = m[:, :]
if isinstance(m, _np.matrix):
return _np.matrix(h)
else:
return h
def rot2euler(r, order='X-Y-Z'):
"""returns the Euler angles corresponding to the rotation matrix
Parameters
----------
r : ndarray
3x3 rotation matrix
order : string
only 'X-Y-Z' & 'Z-Y-X' extrinsic rotations, which correspond to
'Rz(ψ)Ry(θ)Rx(ϕ)' & 'Rx(ϕ)Ry(θ)Rz(ψ)' respectively are implemented.
These compositions also correspond to ZYX and XYZ intrinsic rotations
respectively.
Returns
-------
phi : float
angle w.r.t. x-axis or roll, in radians
theta : float
angle w.r.t. y-axis or pitch, in radians
psi : float
angle w.r.t. z-axis or yaw, in radians
Reference
---------
The function "RotationMatrixToEulerAngles(R)" in VSRS_to_JSON.py
Note
----
If theta in the corresponding to the rotation matrix is very near
90°, we approach a Gimbal-lock situation, and there are infinite
solutions.
"""
def allmost_equal_to_zero(val):
return abs(val) < 1e-12
psi = 0.0 # yaw
theta = 0.0 # pitch
phi = 0.0 # roll
if order == 'X-Y-Z': # extrinsic XYZ or intrinsic ZYX
if allmost_equal_to_zero(r[0, 0]) and allmost_equal_to_zero(r[1, 0]):
# Gimbal-lock; infinite solutions possible, return one solution
psi = _np.arctan2(r[1, 2], r[0, 2])
if r[2, 0] < 0.0:
theta = _np.pi / 2
else:
theta = -_np.pi / 2
phi = 0.0
else:
psi = _np.arctan2(r[1, 0], r[0, 0])
if allmost_equal_to_zero(r[0, 0]):
theta = _np.arctan2(-r[2, 0], r[1, 0] / _np.sin(psi))
else:
theta = _np.arctan2(-r[2, 0], r[0, 0] / _np.cos(psi))
phi = _np.arctan2(r[2, 1], r[2, 2])
elif order == 'Z-Y-X': # extrinsic ZYX or intrinsic XYZ
if allmost_equal_to_zero(r[1, 2]) and allmost_equal_to_zero(r[2, 2]):
# Gimbal-lock; infinite solutions possible, return one solution
phi = _np.arctan2(r[0, 1], r[1, 1])
if r[2, 2] < 0.0:
theta = -_np.pi / 2
else:
theta = _np.pi / 2
psi = 0.0
else:
phi = _np.arctan2(-r[1, 2], r[2, 2])
if allmost_equal_to_zero(r[2, 2]):
theta = _np.arctan2(r[0, 2], -r[1, 0] / _np.sin(phi))
else:
theta = _np.arctan2(r[0, 2], r[2, 2] / _np.cos(phi))
psi = _np.arctan2(-r[0, 1], r[0, 0])
else: # order different from 'X-Y-Z' and 'Z-Y-X'
raise NotImplementedError("To be implemented")
return phi, theta, psi
#%% Symbolic Computation functions (experimental, requires Sympy)
def rotX_symbolic(angle='ϕ'):
"""
Example
-------
>>> import sympy as sy
>>> rotX_symbolic('phi') # same as rg.rotX_symbolic()
+- -+
| 1 0 0 |
| 0 cos(𝜙) sin(𝜙) |
| 0 −sin(𝜙) cos(𝜙) |
+- -+
"""
if isinstance(angle, _sy.Symbol):
t = angle
else:
t = _sy.symbols(angle, real=True)
r = _sy.Matrix(((1, 0, 0 ),
(0, _sy.cos(t), -_sy.sin(t)),
(0, _sy.sin(t), _sy.cos(t)),
))
return r
def rotY_symbolic(angle='θ'):
if isinstance(angle, _sy.Symbol):
t = angle
else:
t = _sy.symbols(angle, real=True)
r = _sy.Matrix(((_sy.cos(t), 0, _sy.sin(t)),
( 0, 1, 0 ),
(-_sy.sin(t), 0, _sy.cos(t)),
))
return r
def rotZ_symbolic(angle='ψ'):
if isinstance(angle, _sy.Symbol):
t = angle
else:
t = _sy.symbols(angle, real=True)
r = _sy.Matrix(((_sy.cos(t), -_sy.sin(t), 0),
(_sy.sin(t), _sy.cos(t), 0),
( 0, 0, 1),
))
return r
def euler2rot_symbolic(angle1='ϕ', angle2='θ', angle3='ψ', order='X-Y-Z', ertype='extrinsic'):
"""returns symbolic expression for the composition of elementary rotation matrices
Parameters
----------
angle1 : string or sympy.Symbol
angle representing first rotation
angle2 : string or sympy.Symbol
angle representing second rotation
angle3 : string or sympy.Symbol
angle representing third rotation
order : string
valid string sequence that specifies the order of rotation. See `euler2rot()`
for details
ertype : string ('extrinsic' or 'intrinsic') See `euler2rot()` for details
the type of elemental rotations.
deg : bool
`True` = degree (default), `False` = radians
Example
-------
>>> R = euler2rot_symbolic('1', '2', '3', 'X-Y-Z' , 'intrinsic')
>>> c, s = sy.symbols('c, s', cls=sy.Function)
>>> R.subs({sy.cos:c, sy.sin:s})
Matrix([
[ c(2)*c(3), -c(2)*s(3), s(2)],
[ c(1)*s(3) + c(3)*s(1)*s(2), c(1)*c(3) - s(1)*s(2)*s(3), -c(2)*s(1)],
[-c(1)*c(3)*s(2) + s(1)*s(3), c(1)*s(2)*s(3) + c(3)*s(1), c(1)*c(2)]])
Note
----
The order of the input angles are specified in the order of rotations (corresponding
to the `order`). They are not specified with respect to any particular axis.
"""
X = rotX_symbolic
Y = rotY_symbolic
Z = rotZ_symbolic
order = order.split('-')
if ertype == 'extrinsic':
order.reverse()
composition = '{}(angle3)*{}(angle2)*{}(angle1)'.format(*order)
elif ertype == 'intrinsic':
composition = '{}(angle1)*{}(angle2)*{}(angle3)'.format(*order)
else:
raise ValueError('Incorrect elemental rotation parameter.')
#print(composition)
return eval(composition)
#%% TEST FUNCTIONS
def _test_dual_matrix():
"""basic test for dual_matrix() function
"""
v = _np.array([1, 2, 3])
dm = _np.matrix(([[ 0., -3., 2.],
[ 3., 0., -1.],
[-2., 1., 0.]]))
_nt.assert_array_almost_equal(dm, dual_matrix(v), decimal=6)
print("test dual_matrix() successful")
def _test_skew():
"""basic test for skew() function
"""
v = _np.array([1, 2, 3])
dm = dual_matrix(v)
sk = skew(v)
_nt.assert_almost_equal(dm, sk, decimal=6)
print("test skew() successful")
def _test_rotMat2D():
"""test the function rotMat2D()
"""
# simple test
angle = 45 # 45 degrees
r = rotMat2D(angle,'d')
v = 1/_np.sqrt(2)
rExp = _np.matrix([[v, -v],[v, v]])
_nt.assert_array_almost_equal(r, rExp) # this is probably a good way to test for float values
angRadians = _np.deg2rad(45)
rr = rotMat2D(angRadians)
_nt.assert_array_almost_equal(rr, rExp)
# product of two rotation matrices
randomAngle = lambda: _np.random.random_integers(0, 90)
ra1, ra2 = randomAngle(), randomAngle()
ra3 = ra1 + ra2
r1 = rotMat2D(ra1, 'd')
r2 = rotMat2D(ra2, 'd')
r3 = rotMat2D(ra3, 'd')
r2r1 = r1*r2
r1r2 = r2*r1
_nt.assert_array_almost_equal(r2r1, r1r2)
_nt.assert_array_almost_equal(r2r1, r3)
# rotation matrix properties
_nt.assert_almost_equal(_lalg.det(r2), 1.0, decimal=8) # det() = +1
_nt.assert_array_almost_equal(r2*r2.T, _np.identity(2)) # orthogonal matrix
_nt.assert_array_almost_equal(r2.T, _lalg.inv(r2)) # inverse = transpose
print("test rotMat2D() successful")
def _test_rotMat3D():
"""test the function rotMat3D
"""
randomAngle = lambda: _np.random.random_integers(0, 90)
angle = randomAngle()
r = rotMat3D((1, 0, 0), angle, 'd')
c, s = _np.cos(_np.deg2rad(angle)), _np.sin(_np.deg2rad(angle))
rExp = _np.matrix([[ 1.0, 0.0, 0.0],
[ 0.0, c, -s],
[ 0.0, s, c]])
_nt.assert_array_almost_equal(r, rExp, decimal=8)
# rotation matrix properties
_nt.assert_almost_equal(_lalg.det(r), 1.0, decimal=8) # det() = +1
_nt.assert_array_almost_equal(r*r.T, _np.identity(3)) # orthogonal matrix
_nt.assert_array_almost_equal(r.T, _lalg.inv(r)) # inverse = transpose
print("test rotMat3D() successful")
def _test_rot2():
theta = 15.0
r1 = rot2(theta)
r2 = rot2(_np.deg2rad(theta), deg=False)
_nt.assert_array_almost_equal(r1, r2)
print("test rot2() successful")
def _test_rotX():
theta = 15.0
r1 = rotX(theta)
assert isinstance(r1, _np.matrix)
r2 = rotX(_np.deg2rad(theta), deg=False)
_nt.assert_array_almost_equal(r1, r2)
print("test rotX() successful")
def _test_rotY():
theta = 15.0
r1 = rotY(theta)
assert isinstance(r1, _np.matrix)
r2 = rotY(_np.deg2rad(theta), deg=False)
_nt.assert_array_almost_equal(r1, r2)
print("test rotY() successful")
def _test_rotZ():
theta = 15.0
r1 = rotZ(theta)
assert isinstance(r1, _np.matrix)
r2 = rotZ(_np.deg2rad(theta), deg=False)
_nt.assert_array_almost_equal(r1, r2)
print("test rotZ() successful")
def _test_euler2rot():
# check with verified, known result
r = euler2rot(0.1, 0.2, 0.3, 'Z-Y-Z', 'intrinsic', False)
assert isinstance(r, _np.matrix)
rexp = _np.matrix([[ 0.902113 , -0.38355704, 0.19767681],
[ 0.3875172 , 0.92164909, 0.01983384],
[-0.18979606, 0.0587108 , 0.98006658]])
_nt.assert_array_almost_equal(r, rexp, decimal=6)
# validate intrinsic rotation
r = euler2rot(20, 30, 40, 'X-Y-Z', 'intrinsic')
rexp = rotX(20)*rotY(30)*rotZ(40)
_nt.assert_array_almost_equal(r, rexp, decimal=6)
r = euler2rot(20, 30, 40, 'Z-Y-Z', 'intrinsic')
rexp = rotZ(20)*rotY(30)*rotZ(40)
_nt.assert_array_almost_equal(r, rexp, decimal=6)
# validate extrinsic rotations
r = euler2rot(20, 30, 40, 'Z-Y-Z', 'extrinsic')
rexp = rotZ(40)*rotY(30)*rotZ(20)
_nt.assert_array_almost_equal(r, rexp, decimal=6)
r = euler2rot(20, 30, 40, 'X-Y-Z', 'extrinsic')
rexp = rotZ(40)*rotY(30)*rotX(20)
| _nt.assert_array_almost_equal(r, rexp, decimal=6) | numpy.testing.assert_array_almost_equal |
import sparse_numeric_table as spt
import numpy as np
import pandas as pd
import tempfile
import os
EXAMPLE_TABLE_STRUCTURE = {
'elementary_school': {
'lunchpack_size': {'dtype': '<f8'},
'num_friends': {'dtype': '<i8'},
},
'high_school': {
'time_spent_on_homework': {'dtype': '<f8'},
'num_best_friends': {'dtype': '<i8'},
},
'university': {
'num_missed_classes': {'dtype': '<i8'},
'num_fellow_students': {'dtype': '<i8'},
},
}
def _make_example_table(prng, size, start_index=0):
"""
Children start in elementary school. 10% progress to high school, and 10%
of those progress to university.
At each point in their career statistics are collected that can be put to
columns, while every child is represented by a line.
Unfortunately, a typical example of a sparse table.
"""
t = {}
t['elementary_school'] = spt.dict_to_recarray(
{
spt.IDX: start_index + np.arange(size).astype(spt.IDX_DTYPE),
'lunchpack_size': prng.uniform(size=size).astype('<f8'),
'num_friends': prng.uniform(
low=0,
high=5,
size=size).astype('<i8'),
}
)
high_school_size = size//10
t['high_school'] = spt.dict_to_recarray(
{
spt.IDX: prng.choice(
t['elementary_school'][spt.IDX],
size=high_school_size,
replace=False),
'time_spent_on_homework': 100 + 100*prng.uniform(
size=high_school_size).astype('<f8'),
'num_best_friends': prng.uniform(
low=0,
high=5,
size=high_school_size).astype('<i8'),
}
)
university_size = high_school_size//10
t['university'] = spt.dict_to_recarray(
{
spt.IDX: prng.choice(
t['high_school'][spt.IDX],
size=university_size,
replace=False),
'num_missed_classes': 100*prng.uniform(
size=university_size).astype('<i8'),
'num_fellow_students': prng.uniform(
low=0,
high=5,
size=university_size).astype('<i8'),
}
)
spt.assert_structure_keys_are_valid(structure=EXAMPLE_TABLE_STRUCTURE)
spt.assert_table_has_structure(table=t, structure=EXAMPLE_TABLE_STRUCTURE)
return t
def test_from_records():
prng = np.random.Generator(np.random.MT19937(seed=0))
rnd = prng.uniform
# define what your table will look like
# -------------------------------------
structure = {
"A": {
"a": {"dtype": '<f8'},
"b": {"dtype": '<f8'},
},
"B": {
"c": {"dtype": '<f8'},
"d": {"dtype": '<f8'},
},
"C": {
"e": {"dtype": '<f8'},
},
}
# populate the table using records
# --------------------------------
with tempfile.TemporaryDirectory(prefix='test_sparse_table') as tmp:
num_jobs = 100
n = 5
job_result_paths = []
for j in range(num_jobs):
# map the population of the sparse table onto many jobs
# -----------------------------------------------------
i = j*n
table_records = {}
table_records["A"] = []
table_records["A"].append({spt.IDX: i+0, "a": rnd(), "b": rnd()})
table_records["A"].append({spt.IDX: i+1, "a": rnd(), "b": rnd()})
table_records["A"].append({spt.IDX: i+2, "a": rnd(), "b": rnd()})
table_records["A"].append({spt.IDX: i+3, "a": rnd(), "b": rnd()})
table_records["A"].append({spt.IDX: i+4, "a": rnd(), "b": rnd()})
table_records["B"] = []
table_records["B"].append({spt.IDX: i+0, "c": rnd(), "d": 5*rnd()})
table_records["B"].append({spt.IDX: i+3, "c": rnd(), "d": 5*rnd()})
table_records["C"] = []
if rnd() > 0.9:
table_records["C"].append({spt.IDX: i+3, "e": -rnd()})
table = spt.table_of_records_to_sparse_numeric_table(
table_records=table_records,
structure=structure)
path = os.path.join(tmp, '{:06d}.tar'.format(j))
job_result_paths.append(path)
spt.write(path=path, table=table, structure=structure)
# reduce
# ------
full_table = spt.concatenate_files(
list_of_table_paths=job_result_paths,
structure=structure)
spt.assert_table_has_structure(
table=full_table,
structure=structure)
def test_write_read_full_table():
prng = np.random.Generator(np.random.MT19937(seed=1337))
my_table = _make_example_table(prng=prng, size=1000*1000)
with tempfile.TemporaryDirectory(prefix='test_sparse_table') as tmp:
path = os.path.join(tmp, 'my_table.tar')
spt.write(
path=path,
table=my_table,
structure=EXAMPLE_TABLE_STRUCTURE)
my_table_back = spt.read(
path=path,
structure=EXAMPLE_TABLE_STRUCTURE)
spt.assert_tables_are_equal(my_table, my_table_back)
# no structure
path_nos = os.path.join(tmp, 'my_table_no_structure.tar')
spt.write(path=path_nos, table=my_table)
my_table_back_nos = spt.read(path=path_nos)
spt.assert_tables_are_equal(my_table, my_table_back_nos)
spt.assert_table_has_structure(
table=my_table_back_nos,
structure=EXAMPLE_TABLE_STRUCTURE)
def test_write_read_empty_table():
prng = np.random.Generator(np.random.MT19937(seed=1337))
empty_table = _make_example_table(prng=prng, size=0)
with tempfile.TemporaryDirectory(prefix='test_sparse_table') as tmp:
path = os.path.join(tmp, 'my_empty_table.tar')
spt.write(
path=path,
table=empty_table,
structure=EXAMPLE_TABLE_STRUCTURE)
my_table_back = spt.read(
path=path,
structure=EXAMPLE_TABLE_STRUCTURE)
spt.assert_tables_are_equal(empty_table, my_table_back)
# no structure
path_nos = os.path.join(tmp, 'my_empty_table_no_structure.tar')
spt.write(path=path_nos, table=empty_table)
my_table_back_nos = spt.read(path=path_nos)
spt.assert_tables_are_equal(empty_table, my_table_back_nos)
spt.assert_table_has_structure(
table=my_table_back_nos,
structure=EXAMPLE_TABLE_STRUCTURE)
def test_merge_common():
prng = np.random.Generator(np.random.MT19937(seed=1337))
my_table = _make_example_table(prng=prng, size=1000*1000)
common_indices = spt.intersection(
[my_table[lvl][spt.IDX] for lvl in my_table]
)
my_common_table = spt.cut_table_on_indices(
table=my_table,
common_indices=common_indices
)
my_sorted_common_table = spt.sort_table_on_common_indices(
table=my_common_table,
common_indices=common_indices
)
np.testing.assert_array_equal(
my_sorted_common_table['elementary_school'][spt.IDX],
my_sorted_common_table['high_school'][spt.IDX]
)
np.testing.assert_array_equal(
my_sorted_common_table['elementary_school'][spt.IDX],
my_sorted_common_table['university'][spt.IDX]
)
my_common_df = spt.make_rectangular_DataFrame(table=my_sorted_common_table)
np.testing.assert_array_equal(
my_sorted_common_table['elementary_school'][spt.IDX],
my_common_df[spt.IDX]
)
def test_merge_across_all_levels_random_order_indices():
prng = np.random.Generator(np.random.MT19937(seed=1337))
size = 1000*1000
my_table = _make_example_table(prng=prng, size=size)
has_elementary_school = my_table['elementary_school'][spt.IDX]
has_high_school = my_table['high_school'][spt.IDX]
has_university = my_table['university'][spt.IDX]
has_big_lunchpack = my_table['elementary_school'][spt.IDX][
my_table['elementary_school']['lunchpack_size'] > 0.5]
has_2best_friends = my_table['high_school'][spt.IDX][
my_table['high_school']['num_best_friends'] >= 2]
cut_indices = np.intersect1d(has_elementary_school, has_high_school)
cut_indices = np.intersect1d(cut_indices, has_university)
cut_indices = np.intersect1d(cut_indices, has_big_lunchpack)
cut_indices = np.intersect1d(cut_indices, has_2best_friends)
np.random.shuffle(cut_indices)
cut_table = spt.cut_table_on_indices(
table=my_table,
common_indices=cut_indices,
level_keys=['elementary_school', 'high_school', 'university'])
sorted_cut_table = spt.sort_table_on_common_indices(
table=cut_table,
common_indices=cut_indices
)
np.testing.assert_array_equal(
sorted_cut_table['elementary_school'][spt.IDX],
sorted_cut_table['high_school'][spt.IDX]
)
np.testing.assert_array_equal(
sorted_cut_table['elementary_school'][spt.IDX],
sorted_cut_table['university'][spt.IDX]
)
np.testing.assert_array_equal(
sorted_cut_table['elementary_school'][spt.IDX],
cut_indices
)
def test_merge_random_order_indices():
prng = np.random.Generator(np.random.MT19937(seed=1337))
size = 1000*1000
my_table = _make_example_table(prng=prng, size=size)
has_elementary_school = my_table['elementary_school'][spt.IDX]
has_high_school = my_table['high_school'][spt.IDX]
has_big_lunchpack = my_table['elementary_school'][spt.IDX][
my_table['elementary_school']['lunchpack_size'] > 0.5]
has_2best_friends = my_table['high_school'][spt.IDX][
my_table['high_school']['num_best_friends'] >= 2]
cut_indices = np.intersect1d(has_elementary_school, has_high_school)
cut_indices = | np.intersect1d(cut_indices, has_big_lunchpack) | numpy.intersect1d |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Fri Feb 17 12:38:44 2017
@author: ahefny, zmarinho
"""
from collections import defaultdict
import numpy as np
import theano
import theano.printing
import theano.tensor as T
import theano.tensor.slinalg
from theano.tensor.nlinalg import matrix_inverse
import rpsp.rpspnets.psr_lite.psr_base as psr_base
import rpsp.rpspnets.psr_lite.rnn_filter as rnn_filter
import rpsp.rpspnets.psr_lite.utils.nn as nn
from rpsp.rpspnets.psr_lite.utils.nn import cg_solve, cg_solve_batch, neumann_inv, neumann_inv_batch, \
batched_matrix_inverse
from rpsp.rpspnets.psr_lite.utils.nn import reshape_mat_f
class RFFPSR_RNN(rnn_filter.BaseRNNFilter):
'''
Theano wrapper of RFFPSR.
'''
def __init__(self, psr, optimizer='sgd', optimizer_step=1.0,
optimizer_iterations=0, val_trajs=0,
optimizer_min_step=1e-5, rng=None, opt_h0=False,
psr_norm='I', psr_cond='kbr', psr_iter=0, psr_smooth='I'):
rnn_filter.BaseRNNFilter.__init__(self, psr.state_dimension, psr.horizon_length,
optimizer, optimizer_step, optimizer_iterations,
optimizer_min_step, val_trajs, rng=rng, opt_h0=opt_h0)
self._psr_iter = psr_iter
self._psr_cond = psr_cond
self._state_norm = psr_norm
smooth_toks = psr_smooth.split('_')
self._state_smooth = smooth_toks[0]
if len(smooth_toks)>1:
self._state_smooth_coeff = float(smooth_toks[1])
self._f_obs = None
self._f_act = None
self._f_fut_act = None
self._reset_psr(psr)
self._obs_dim = 0
solve_dict = defaultdict(lambda: self._tf_solve_inverse, {'kbrcg': self._tf_solve_cg, 'kbrMIA': self._tf_solve_mia, 'I': self._tf_solve_ignore})
solve_dict_batch = defaultdict(lambda: self._tf_solve_inverse_batch, {'kbrcg': self._tf_solve_cg_batch, 'kbrMIA': self._tf_solve_mia_batch, 'I': self._tf_solve_ignore})
self._solve = solve_dict[self._psr_cond]
self._solve_batch = solve_dict_batch[self._psr_cond]
self._norm_method = defaultdict(lambda: self._t_state_noop , {'l2': self._t_state_l2norm,
'l2clamp': self._t_clamp_state_l2norm,
'coord':self._t_clamp_state_coord})[self._state_norm]
self._smooth = defaultdict(lambda: self._t_state_noop, {'interp': self._t_state_interpolate})[self._state_smooth]
self._max_state_norm2 = 100.0
self._max_state_norm = 10.0
self._max_state_coord = 10.0
self._min_state_coord = 1e-6
def _t_rff(self, x, V):
y = T.dot(x, V)
return T.concatenate([T.sin(y), T.cos(y)], axis=y.ndim-1) / T.sqrt(V.shape[1].astype(theano.config.floatX))
def _t_rffpca(self, fext, name):
'''
Given an RFFPCA feature extractor return:
- A handle to an equivalent symbolic function.for vectors
- A shared variable storing projection matrix.
- A shared variable storing RFF matrix.
'''
U = theano.shared(name='U_%s' % name, value=fext._U.astype(theano.config.floatX))
V = theano.shared(name='V_%s' % name, value=fext._base_extractor._V.astype(theano.config.floatX))
f = lambda x: T.dot(self._t_rff(x, V), U)
return f, U, V
def set_psr(self, rff_psr):
self._rffpsr = rff_psr
self._fut = self._rffpsr._fut
self._feat_dim = self._rffpsr._feat_dim
self._state_dim = self._rffpsr.state_dimension
self._fext_fut_act = self._rffpsr._fext_fut_act
self._fext_act = self._rffpsr._fext_act
self._fext_obs = self._rffpsr._fext_obs
self._feat_dim = self._rffpsr._feat_dim
return
#overrides
def _load(self, params):
print('load rffpsr rnn')
self._rffpsr._load(params['rffpsr'])
self._reset_psr(self._rffpsr)
return
#overrides
def _save(self):
params={}
params['rffpsr'] = self._rffpsr._save()
return params
def _reset_psr(self, psr):
self.set_psr(psr)
self._f_obs = lambda x: x
self._f_act = lambda x: x
self._f_fut_act = lambda x: x
return
def train(self, traj_obs, traj_act, traj_act_probs=None, on_unused_input='raise'):
self._reset_psr(self._rffpsr)
return rnn_filter.BaseRNNFilter.train(self, traj_obs, traj_act, traj_act_probs=traj_act_probs, on_unused_input=on_unused_input)
def _process_traj(self, traj_obs, traj_act):
if traj_obs.shape[0] <= self._fut + 3:
return None
else:
data = psr_base.extract_timewins([traj_obs], [traj_act], self._fut, 1)[0]
return self._process_obs(data.obs), \
self._process_act(data.act), \
self._process_fut_act(data.fut_act), \
data.fut_obs
def _process_obs(self, obs):
ofeat = self._fext_obs.process(obs)
assert not np.isnan(ofeat).any(), 'obsfeat is not nan'
assert not np.isinf(ofeat).any(), 'obsfeat is not inf'
return ofeat
def _process_act(self, act):
afeat = self._fext_act.process(act)
assert not np.isnan(afeat).any(), 'actfeat is not nan'
assert not np.isinf(afeat).any(), 'actfeat is not inf'
return afeat
def _process_fut_act(self, fut_act):
futafeat = self._fext_fut_act.process(fut_act)
assert not np.isnan(futafeat).any(), 'futafeat is not nan'
assert not | np.isinf(futafeat) | numpy.isinf |
# -*- coding: utf-8 -*-
import random as rn
rn.seed(2)
from numpy.random import seed
seed(2)
from tensorflow import set_random_seed
set_random_seed(2)
import tensorflow as tf
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import os
import numpy as np
def load_data(name='freq', get_all_special_data=True):
if name == 'freq':
path = 'D:/Python/TAR/dataset/metadata/bags of words'
elif name == 'chi2':
path = 'D:/Python/TAR/chi2_scores/metadata/bags_of_words'
elif name == 'tfidf':
path = 'D:/Python/TAR/tf-idf_scores/metadata/bags_of_words'
else:
raise ValueError
train_negative = path + '/negative'
train_positive = path + '/positive'
test_negative = path + '/negative_test'
test_positive = path + '/positive_test'
special_path = 'D:/Python/TAR/special-data/bags of words'
special_train_negative = special_path + '/negative/'
special_train_positive = special_path + '/positive/'
special_test_negative = special_path + '/negative_test/'
special_test_positive = special_path + '/positive_test/'
#
# load train data
#
train = []
train_X = []
train_S = []
train_y = []
os.chdir(train_negative)
negative_files = os.listdir()
#print('negative train files:', len(negative_files))
for txtfile in negative_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_train_negative + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
train.append([np.array(vector), np.array(special_vector), np.array([1, 0])])
os.chdir(train_positive)
positive_files = os.listdir()
#print('positive train files:', len(positive_files))
for txtfile in positive_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_train_positive + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train = np.array(train)
#np.random.shuffle(train)
# don't shuffle here, shuffle data controlably when necessary
for sample in train:
train_X.append(sample[0])
train_S.append(sample[1])
train_y.append(sample[2])
#
# load test data
#
test = []
test_X = []
test_S = []
test_y = []
os.chdir(test_negative)
negative_files = os.listdir()
#print('negative test files:', len(negative_files))
for txtfile in negative_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_test_negative + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
test.append([np.array(vector), np.array(special_vector), np.array([1, 0])])
os.chdir(test_positive)
positive_files = os.listdir()
#print('positive test files:', len(positive_files))
for txtfile in positive_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_test_positive + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
test.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
test = np.array(test)
#np.random.shuffle(test)
for sample in test:
test_X.append(sample[0])
test_S.append(sample[1])
test_y.append(sample[2])
#print('len(test_y) =', len(test_y))
return np.array(train_X), np.array(train_S), np.array(train_y), | np.array(test_X) | numpy.array |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = | np.zeros((data_rows, 3)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
N_samples = 10000
mu1 = 100.
mu2 = 100.
sigma1 = 20.
sigma2 = 10.
h = 2
def volume(a1, a2, h):
return h*(a1 + a2 + np.sqrt(a1*a2))/3
def compute_mu(mu1, mu2, h):
return volume(mu1, mu2, h)
def compute_sigma(sigma1, sigma2, h):
return h*np.sqrt(sigma1**2+sigma2**2)/3
def compute_sigma_orig(sigma1, sigma2, h):
return h*(sigma1+sigma2)/3
# sample X and Y
s1 = np.random.normal(mu1, sigma1, N_samples)
s2 = | np.random.normal(mu2, sigma2, N_samples) | numpy.random.normal |
from math import sqrt
import numpy as np
from logger import logger
base_logger = logger.getChild('cancer')
base_logger.info('Inside the neighborlist.py module')
from helper import norm
class NeighborList:
"""Neighbor list object.
cutoffs: list of float
List of cutoff radii - one for each atom.
skin: float
If no atom has moved more than the skin-distance since the
last call to the ``update()`` method, then the neighbor list
can be reused. This will save some expensive rebuilds of
the list, but extra neighbors outside the cutoff will be
returned.
self_interaction: bool
Should an atom return itself as a neighbor?
bothways: bool
Return all neighbors. Default is to return only "half" of
the neighbors.
Example::
nl = NeighborList([2.3, 1.7])
nl.update(atoms)
indices, offsets = nl.get_neighbors(0)
"""
def __init__(self, cutoffs, skin=0.3, sorted=False, self_interaction=False,
bothways=False):
self.cutoffs = np.asarray(cutoffs) + skin
self.cutoff = self.cutoffs[0]
self.skin = skin
self.sorted = sorted
self.self_interaction = self_interaction
self.bothways = bothways
self.nupdates = 0
self.num_atoms = 0
self.logger = base_logger.getChild('NeighborList')
self.logger.info('Initializing NeighborList')
def newatom(self,atoms):
""" See if we can just add one new atom """
temp = list(self.cutoffs)
temp.append(self.cutoff)
self.cutoffs = np.asarray( temp )
newpos = atoms.get_positions()[-1]
offset = self.cell[0]
neighs = []
for i,pos in enumerate(self.positions):
if norm(newpos-pos) < self.cutoff:
neighs.append(i)
elif norm(newpos+offset-pos) < self.cutoff:
neighs.append(i)
elif norm(newpos-offset-pos) < self.cutoff:
neighs.append(i)
temppos = self.positions.tolist()
temppos.append(newpos)
self.positions = np.asarray(temppos)
self.neighbors.append(np.asarray(neighs))
self.displacements.append(np.zeros((len(neighs),3)))
self.num_atoms += 1
self.logger.info("Added an atom to the neighborlist...")
return self.update(atoms)
def update(self, atoms):
"""Make sure the list is up to date."""
if self.nupdates == 0:
self.build(atoms)
return True
if len(atoms) > self.num_atoms:
if len(atoms) == self.num_atoms + 1:
return self.newatom(atoms)
else:
self.build(atoms)
self.logger.info("Rebuilding NeighborList")
return True
elif ((self.pbc != atoms.get_pbc()).any() or
(self.cell != atoms.get_cell()).any() or
((self.positions - atoms.get_positions())**2).sum(1).max() >
self.skin**2):
self.build(atoms)
self.logger.info("Rebuilding NeighborList...")
return True
return False
def build(self, atoms):
"""Build the list."""
self.positions = atoms.get_positions()
self.pbc = atoms.get_pbc()
self.cell = atoms.get_cell()
if len(self.cutoffs) > 0:
rcmax = self.cutoffs.max()
else:
rcmax = 0.0
icell = np.linalg.inv(self.cell)
scaled = | np.dot(self.positions, icell) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import gen_data
import qp_solver
import util
import parametric_si
def compute_c_d(X, a, b, p, lamda):
dim_beta = p
dim_z = p - 1
no_vars = p + 2 * dim_z
e_1 = lamda * np.hstack(( | np.zeros(dim_beta) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
from time import time
import pandas as pd
# Underlying informations
S0 = 100
mu = 0.1
sigma = 0.1
# European option informations
T = 1.0
K = 100.0
r = 0.05
# Simulation parameters
nbr_steps = 100
dt = T/nbr_steps
t = np.linspace(0, T, nbr_steps)
min_nbr_sim, max_nbr_sim = 100, 100000
nbr_steps_sims = 100
nbr_sims = np.linspace(min_nbr_sim, max_nbr_sim, nbr_steps_sims)
# Global variables for results storage
prices_standard = []
prices_antithetic = []
# Classic Monte-Carlo simulation
time_begin_classic = time()
for i,nbr_sim in enumerate(nbr_sims):
print(i)
nbr_sim = int(nbr_sim)
price = 0.0
for _ in range(nbr_sim):
W = np.random.standard_normal(size = nbr_steps)
W = np.cumsum(W)*np.sqrt(dt)
X = (mu-0.5*sigma**2)*t + sigma*W
S = S0* | np.exp(X) | numpy.exp |
import sys
import unittest
from itertools import product
import numpy as np
import torch
from metal.label_model.class_balance import ClassBalanceModel
sys.path.append("../synthetic")
class ClassBalanceModelTest(unittest.TestCase):
def _set_seed(self, seed):
torch.manual_seed(seed)
| np.random.seed(seed) | numpy.random.seed |
#-*-coding:utf-8-*-
from netCDF4 import Dataset
import numpy as np
import os
import scipy.interpolate as scp
import matplotlib.pyplot as plt
import scipy
from time import time
from ctypes import *
from numpy.ctypeslib import ndpointer
from params import *
from write2nc import *
#
def interpolate(lat,lon,Z,B,Bath_fine):
'''
Interpolates coarse grid onto fine grid at one point by selecting nearest deeper points in the Coarse grid,
setting up a mesh on these points and then interpolating on the deepest point in the fine grid.
lat , lon: Coordinates of the points in the coarse grid
lat_fine, lon_fine : Coordinates of the fine grid
Z : Gridded eta values from the coarse grid at the selected (lat, lon)
B : Gridded bathymetry values from the coarse grid at the selected (lat, lon)
imin , jmin : Index of the deepest point in the fine grid (imin , jmin )
'''
Z = np.nan_to_num(Z)
# Intepolate eta and bathy on fine grid
interp_Z = scp.RectBivariateSpline(lat,lon, Z,kx=3,ky=3)
interp_B = scp.RectBivariateSpline(lat,lon, B,kx=3,ky=3)
# lat_int = np.linspace(lat_fine[imin]-2,lat_fine[imin]+2,100)
# lon_int = np.linspace(lon_fine[jmin]-2,lon_fine[jmin]+2,100)
lat_int = np.linspace(lat[0],lat[-1],100)
lon_int = | np.linspace(lon[0],lon[-1],100) | numpy.linspace |
from __future__ import annotations
import numpy as np
import pytest
from manim.utils.space_ops import *
from manim.utils.space_ops import shoelace, shoelace_direction
def test_rotate_vector():
vec = np.array([0, 1, 0])
rotated = rotate_vector(vec, np.pi / 2)
assert | np.round(rotated[0], 5) | numpy.round |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
train = pd.read_csv('task1_train.csv')
test = pd.read_csv('task1_test.csv')
# In[2]:
import jieba
import numpy as np
train['sen_cut'] = train['joke'].apply(jieba.lcut)#分词 变成新的一列
# In[3]:
X_train = train['sen_cut'].apply(lambda x: ' '.join(x)).tolist()#变成一整个列表
y_train = pd.get_dummies((np.asarray(train["label"]))) # 把离散特征onehot表示
text = | np.array(X_train) | numpy.array |
"""PSF module
This module provides a class implementing a spatially varying PSF.
Intended usage is:
>>> unknown ***
"""
import pdb
import numpy
def shift(im, offset, **kw):
"""Wrapper for scipy.ndimage.interpolation.shift"""
from scipy.ndimage.interpolation import shift
if 'order' not in kw:
kw['order'] = 4
# 1" Gaussian: 60 umag; 0.75": 0.4 mmag; 0.5": 4 mmag
# order=3 roughly 5x worse.
if 'mode' not in kw:
kw['mode'] = 'nearest'
if 'output' not in kw:
kw['output'] = im.dtype
return shift(im, offset, **kw)
def central_stamp(stamp, censize=19):
if censize is None:
censize = 19
stampsz = stamp.shape[-1]
if ((stampsz % 2) == 0) | ((censize % 2) == 0):
pdb.set_trace()
if stampsz == censize:
return stamp
elif stampsz > censize:
trim = (stamp.shape[-1] - censize)//2
f = trim
l = stampsz - trim
return stamp[..., f:l, f:l]
else:
ret = numpy.zeros(stamp.shape[:-2]+(censize, censize), dtype='f4')
central_stamp(ret, censize=stampsz)[..., :, :] = stamp
return ret
def neff_fwhm(stamp):
"""FWHM-like quantity derived from N_eff = numpy.sum(PSF**2.)**-1"""
norm = numpy.sum(stamp, axis=(-1, -2), keepdims=True)
return 1.18 * (numpy.pi*numpy.sum((stamp/norm)**2., axis=(-1, -2)))**(-0.5)
def fwhm_neff(fwhm):
return (fwhm/1.18)**2*numpy.pi
def gaussian_psf(fwhm, stampsz=19, deriv=True, shift=[0, 0]):
"""Create Gaussian psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
stampsz (int): the return psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
sigma = fwhm / numpy.sqrt(8*numpy.log(2))
stampszo2 = stampsz // 2
parshape = numpy.broadcast(fwhm, shift[0], shift[1]).shape
if len(parshape) > 0:
sigma, shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (sigma, shift[0], shift[1]))
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
yc = xc.copy()
xc = xc.reshape(-1, 1)-shift[0]
yc = yc.reshape(1, -1)-shift[1]
psf = numpy.exp(-(xc**2. + yc**2.) /
2./sigma**2.).astype('f4')
psf /= numpy.sum(psf[..., :, :])
dpsfdx = xc/sigma**2.*psf
dpsfdy = yc/sigma**2.*psf
ret = psf
if deriv:
ret = (ret,) + (dpsfdx, dpsfdy)
return ret
def moffat_psf(fwhm, beta=3., xy=0., yy=1., stampsz=19, deriv=True,
shift=[0, 0]):
"""Create Moffat psf & derivatives for a given fwhm and stamp size.
Args:
fwhm (float): the full width at half maximum
beta (float): beta parameter for Moffat distribution
xy (float): xy coefficient (0 for uncorrelated)
yy (float): yy coefficient (1 for FWHM_x == FWHM_y)
stampsz (int): the returned psf stamps are [stampsz, stampsz] in size
deriv (bool): return derivatives?
shift (float, float): shift centroid by this amount in x, y
Returns:
(psf, dpsfdx, dpsfdy)
psf (ndarray[stampsz, stampsz]): the psf stamp
dpsfdx (ndarray[stampsz, stampsz]): the x-derivative of the PSF
dpsfdy (ndarray[stampsz, stampsz]): the y-derivative of the PSF
"""
if numpy.any(beta <= 1e-3):
print('Warning: crazy values for beta in moffat_psf')
beta = numpy.clip(beta, 1e-3, numpy.inf)
alpha = fwhm/(2*numpy.sqrt(2**(1./beta)-1))
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
parshape = numpy.broadcast(fwhm, beta, xy, yy, shift[0], shift[1]).shape
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
if len(parshape) > 0:
alpha, beta, xy, yy = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (alpha, beta, xy, yy))
shift = list(shift)
shift[0], shift[1] = (numpy.atleast_1d(q).reshape(-1, 1, 1)
for q in (shift[0], shift[1]))
xc = xc - shift[0]
yc = yc - shift[1]
yy = numpy.abs(yy)
rc2 = yy**(-0.5)*xc**2. + xy*xc*yc + yy**(0.5)*yc**2.
# for bad xy, this can screw up and generate negative values.
if numpy.any(rc2 < 0.):
print('Warning: crazy xy and yy values to moffat_psf')
rc2 = numpy.clip(rc2, 0., numpy.inf)
rc = numpy.sqrt(rc2)
psf = (beta - 1)/(numpy.pi * alpha**2.)*(1.+(rc**2./alpha**2.))**(-beta)
ret = psf
if deriv:
dpsffac = (beta-1)/(numpy.pi*alpha**2.)*(beta)*(
(1+(rc**2./alpha**2.))**(-beta-1))
dpsfdx = dpsffac*2*xc/alpha
dpsfdy = dpsffac*2*yc/alpha
ret = (psf, dpsfdx, dpsfdy)
return ret
def simple_centroid(psf, norm=True):
stampsz = psf.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(-1, 1)
yc = xc.copy().reshape(1, -1)
denom = 1.
if norm:
denom = numpy.sum(psf, axis=(-1, -2))
return (numpy.sum(xc*psf, axis=(-1, -2))/denom,
numpy.sum(yc*psf, axis=(-1, -2))/denom)
def center_psf(psf, censize=None):
"""Center and normalize a psf; centroid is placed at center."""
psf = psf.copy()
cpsf = central_stamp(psf, censize=censize)
for _ in range(3):
xcen, ycen = simple_centroid(cpsf)
psf[:, :] = shift(psf, [-xcen, -ycen],
output=numpy.dtype('f4'))
psf /= numpy.sum(psf)
psf = psf.astype('f4')
return psf
class SimplePSF:
def __init__(self, stamp, normalize=19):
self.stamp = stamp
if normalize > 0:
norm = numpy.sum(central_stamp(stamp, censize=normalize))
self.stamp /= norm
self.deriv = numpy.gradient(-stamp)
def render_model(self, x, y, stampsz=None):
if stampsz is None:
return self.stamp
else:
return central_stamp(self.stamp, censize=stampsz)
def __call__(self, x, y, stampsz=None, deriv=False):
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
if stampsz is None:
stampsz = self.stamp.shape[0]
shiftx, shifty = (numpy.atleast_1d(q) - numpy.round(q) for q in (x, y))
stamp = central_stamp(self.stamp, censize=stampsz)
ret = numpy.zeros(tparshape+(stampsz, stampsz), dtype='f4')
for i in range(ret.shape[0]):
ret[i, :, :] = shift(stamp, (shiftx[i], shifty[i]))
if deriv:
dpsfdx = numpy.zeros_like(ret)
dpsfdy = numpy.zeros_like(ret)
dxstamp = central_stamp(self.deriv[0], censize=stampsz)
dystamp = central_stamp(self.deriv[1], censize=stampsz)
for i in range(ret.shape[0]):
dpsfdx[i, :, :] = shift(dxstamp, (shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dystamp, (shiftx[i], shifty[i]))
if parshape != tparshape:
ret = ret.reshape(stampsz, stampsz)
if deriv:
dpsfdx = dpsfdx.reshape(stampsz, stampsz)
dpsfdy = dpsfdy.reshape(stampsz, stampsz)
if deriv:
ret = (ret, dpsfdx, dpsfdy)
return ret
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('offset', '2f4'),
('stamp', stamp.dtype, stamp.shape)]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['offset'][0, :] = getattr(self, 'offset', (0, 0))
res['stamp'][0, ...] = stamp
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
class MoffatPSF:
def __init__(self, fwhm, beta, xy=0., yy=1., normalize=19):
self.fwhm = fwhm
self.beta = beta
self.xy = xy
self.yy = yy
if normalize > 0:
self.norm = numpy.sum(self.render_model(0, 0, stampsz=19))
else:
self.norm = 1
def render_model(self, x, y, stampsz=59):
res = moffat_psf(self.fwhm, beta=self.beta, xy=self.xy,
yy=self.yy, stampsz=stampsz)
return res
def __call__(self, x, y, stampsz=None, deriv=False):
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
res = moffat_psf(self.fwhm, beta=self.beta, xy=self.xy,
yy=self.yy, stampsz=stampsz, deriv=deriv,
shift=(shiftx, shifty))
if deriv:
res = [r / self.norm for r in res]
else:
res = res / self.norm
return res
class VariableMoffatPSF:
def __init__(self, fwhm, beta, xy=0., yy=1., normalize=19):
self.fwhm = numpy.atleast_2d(fwhm)
self.beta = numpy.atleast_2d(beta)
self.xy = numpy.atleast_2d(xy)
self.yy = numpy.atleast_2d(yy)
self.normalize = normalize
def render_model(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
x = x / 1000.
y = y / 1000.
fwhm = polyval2d(x, y, self.fwhm)
beta = polyval2d(x, y, self.beta)
xy = polyval2d(x, y, self.xy)
yy = polyval2d(x, y, self.yy)
return moffat_psf(fwhm, beta=beta, xy=xy,
yy=yy, stampsz=stampsz, deriv=deriv)
def __call__(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
x = x / 1000.
y = y / 1000.
fwhm = polyval2d(x, y, self.fwhm)
beta = polyval2d(x, y, self.beta)
xy = polyval2d(x, y, self.xy)
yy = polyval2d(x, y, self.yy)
tstampsz = max(stampsz, self.normalize)
psf = moffat_psf(fwhm, beta=beta, xy=xy,
yy=yy, stampsz=tstampsz, deriv=deriv,
shift=(shiftx, shifty))
if not deriv:
psf = [psf]
if self.normalize > 0:
norms = numpy.sum(central_stamp(psf[0], censize=self.normalize),
axis=(-1, -2)).reshape(-1, 1, 1)
else:
norms = 1
psf = [central_stamp(p, censize=stampsz) / norms
for p in psf]
if not deriv:
psf = psf[0]
return psf
class VariablePixelizedPSF:
def __init__(self, stamp, normalize=19):
stampsz = stamp.shape[-1]
if (stampsz % 2) == 0:
raise ValueError('problematic shape')
self.stamp = stamp
self.normalize = normalize
self.deriv = numpy.gradient(-self.stamp, axis=(2, 3))
if normalize > 0:
cstamp = central_stamp(stamp, normalize)
else:
cstamp = stamp
self.normstamp = numpy.sum(cstamp, axis=(2, 3))
stampsz = cstamp.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(1, 1, -1, 1)
yc = xc.copy().reshape(1, 1, 1, -1)
self.xstamp = numpy.sum(xc*cstamp, axis=(2, 3))
self.ystamp = numpy.sum(yc*cstamp, axis=(2, 3))
def norm(self, x, y):
from numpy.polynomial.polynomial import polyval2d
x, y = (x/1000., y/1000.)
return polyval2d(x, y, self.normstamp)
def centroid(self, x, y):
from numpy.polynomial.polynomial import polyval2d
x, y = (x/1000., y/1000.)
if self.normalize < 0:
norm = 1
else:
norm = self.norm(x, y)
xc = polyval2d(x, y, self.xstamp)
yc = polyval2d(x, y, self.ystamp)
return xc/norm, yc/norm
def render_model(self, x, y, stampsz=59, deriv=False):
from numpy.polynomial.polynomial import polyval2d
x = x / 1000.
y = y / 1000.
tstamps = polyval2d(x, y, central_stamp(self.stamp, stampsz))
if len(tstamps.shape) == 3:
tstamps = tstamps.transpose(2, 0, 1)
if deriv:
dpsfdx = polyval2d(x, y, central_stamp(self.deriv[0], stampsz))
dpsfdy = polyval2d(x, y, central_stamp(self.deriv[1], stampsz))
if len(tstamps.shape) == 3:
dpsfdx = dpsfdx.transpose(2, 0, 1)
dpsfdy = dpsfdy.transpose(2, 0, 1)
tstamps = (tstamps, dpsfdx, dpsfdy)
return tstamps
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('offset', '2f4'),
('stamp', stamp.dtype, stamp.shape)]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['offset'][0, :] = getattr(self, 'offset', (0, 0))
res['stamp'][0, ...] = stamp
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
def __call__(self, x, y, stampsz=None, deriv=False):
if stampsz is None:
stampsz = self.stamp.shape[-1]
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
stamps = self.render_model(x, y, stampsz=stampsz, deriv=deriv)
if deriv:
stamps, dpsfdx, dpsfdy = stamps
dpsfdx = dpsfdx.reshape(tparshape+(stampsz, stampsz))
dpsfdy = dpsfdy.reshape(tparshape+(stampsz, stampsz))
stamps = stamps.reshape(tparshape+(stampsz, stampsz))
norm = numpy.atleast_1d(self.norm(x, y))
shiftx = numpy.atleast_1d(shiftx)
shifty = numpy.atleast_1d(shifty)
for i in range(stamps.shape[0]):
stamps[i, :, :] = shift(stamps[i, :, :], (shiftx[i], shifty[i]))
stamps /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
stamps = stamps.reshape(stamps.shape[1:])
if deriv:
for i in range(stamps.shape[0]):
dpsfdx[i, :, :] = shift(dpsfdx[i, :, :],
(shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dpsfdy[i, :, :],
(shiftx[i], shifty[i]))
dpsfdx /= norm.reshape(-1, 1, 1)
dpsfdy /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
dpsfdx = dpsfdx.reshape(stamps.shape[1:])
dpsfdy = dpsfdy.reshape(stamps.shape[1:])
stamps = (stamps, dpsfdx, dpsfdy)
return stamps
class VariableMoffatPixelizedPSF:
def __init__(self, stamp, fwhm, beta, xy=0., yy=1., normalize=-1):
self.moffat = VariableMoffatPSF(fwhm, beta, xy=xy, yy=yy, normalize=-1)
self.resid = VariablePixelizedPSF(stamp, normalize=-1)
self.normalize = normalize
def render_model(self, x, y, stampsz=59, deriv=False):
mof = self.moffat.render_model(x, y, stampsz=stampsz, deriv=deriv)
res = self.resid.render_model(x, y, stampsz=stampsz, deriv=deriv)
if not deriv:
return mof + res
else:
return [a+b for (a, b) in zip(mof, res)]
def __call__(self, x, y, stampsz=None, deriv=False):
stampsz = (stampsz if stampsz is not None else
self.resid.stamp.shape[-1])
tstampsz = max(stampsz, self.normalize)
modstamp = self.render_model(x, y, stampsz=tstampsz, deriv=deriv)
if not deriv:
modstamp = [modstamp]
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
if len(modstamp[0].shape) == 2:
for modstamp0 in modstamp:
modstamp0[:, :] = shift(modstamp0[:, :], (shiftx, shifty))
else:
for modstamp0 in modstamp:
for i in range(modstamp0.shape[0]):
modstamp0[i, :, :] = shift(modstamp0[i, :, :],
(shiftx[i], shifty[i]))
if self.normalize > 0:
norms = numpy.sum(central_stamp(modstamp[0],
censize=self.normalize),
axis=(-1, -2))
norms = numpy.array(norms)[..., None, None]
else:
norms = 1 + self.resid.norm(x, y)
for modstamp0 in modstamp:
if len(modstamp0.shape) == 2:
modstamp0 /= norms
else:
modstamp0 /= norms.reshape(-1, 1, 1)
if not deriv:
modstamp = modstamp[0]
return modstamp
class GridInterpPSF:
def __init__(self, stamp, x, y, normalize=19):
stampsz = stamp.shape[-1]
if (stampsz % 2) == 0:
raise ValueError('problematic shape')
if (stamp.shape[0] != len(x)) or (stamp.shape[1] != len(y)):
raise ValueError('mismatch between grid coordinates and stamp.')
self.stamp = stamp
self.normalize = normalize
self.x = x
self.y = y
self.deriv = numpy.gradient(-self.stamp, axis=(2, 3))
if normalize > 0:
cstamp = central_stamp(stamp, normalize)
else:
cstamp = stamp
self.normstamp = numpy.sum(cstamp, axis=(2, 3))
stampsz = cstamp.shape[-1]
stampszo2 = stampsz // 2
xc = numpy.arange(stampsz, dtype='f4')-stampszo2
xc = xc.reshape(1, 1, -1, 1)
yc = xc.copy().reshape(1, 1, 1, -1)
self.xstamp = numpy.sum(xc*cstamp, axis=(2, 3))
self.ystamp = numpy.sum(yc*cstamp, axis=(2, 3))
def interpolator(self, stamp, x, y):
x0 = numpy.atleast_1d(x)
y0 = numpy.atleast_1d(y)
ind = [numpy.interp(z, zgrid, numpy.arange(len(zgrid), dtype='f4'),
left=0, right=len(zgrid)-1)
for (z, zgrid) in ((x0, self.x), (y0, self.y))]
w1 = [numpy.ceil(z) - z for z in ind]
w2 = [1 - z for z in w1]
left = [numpy.floor(z).astype('i4') for z in ind]
right = [numpy.ceil(z).astype('i4') for z in ind]
ret = numpy.zeros((len(x0),)+stamp.shape[2:], dtype=stamp.dtype)
for i in range(len(x0)):
ret[i, ...] = (
w1[0][i]*w1[1][i]*stamp[left[0][i], left[1][i], ...] +
w1[0][i]*w2[1][i]*stamp[left[0][i], right[1][i], ...] +
w2[0][i]*w1[1][i]*stamp[right[0][i], left[1][i], ...] +
w2[0][i]*w2[1][i]*stamp[right[0][i], right[1][i], ...])
if x0 is not x:
ret = ret[0]
return ret
def norm(self, x, y):
return self.interpolator(self.normstamp, x, y)
def centroid(self, x, y):
if self.normalize < 0:
norm = 1
else:
norm = self.norm(x, y)
xc = self.interpolator(self.xstamp, x, y)
yc = self.interpolator(self.ystamp, x, y)
return xc/norm, yc/norm
def render_model(self, x, y, stampsz=59, deriv=False):
tstamps = self.interpolator(central_stamp(self.stamp, stampsz), x, y)
if deriv:
dpsfdx = self.interpolator(central_stamp(self.deriv[0], stampsz),
x, y)
dpsfdy = self.interpolator(central_stamp(self.deriv[1], stampsz),
x, y)
tstamps = (tstamps, dpsfdx, dpsfdy)
return tstamps
def serialize(self, stampsz=None):
stamp = self.stamp
if stampsz is not None:
stamp = central_stamp(self.stamp, stampsz)
dtype = [('stamp', stamp.dtype, stamp.shape),
('x', len(self.x), 'f4'), ('y', len(self.y), 'f4')]
extrapar = getattr(self, 'extraparam', None)
if extrapar is not None:
dtype += extrapar.dtype.descr
res = numpy.zeros(1, dtype=dtype)
res['stamp'][0, ...] = stamp
res['x'][0, ...] = self.x
res['y'][0, ...] = self.y
if getattr(self, 'extraparam', None) is not None:
for name in extrapar.dtype.names:
res[name][0, ...] = extrapar[name]
return res
def __call__(self, x, y, stampsz=None, deriv=False):
if stampsz is None:
stampsz = self.stamp.shape[-1]
parshape = numpy.broadcast(x, y).shape
tparshape = parshape if len(parshape) > 0 else (1,)
x = numpy.atleast_1d(x)
y = numpy.atleast_1d(y)
shiftx, shifty = (q - numpy.round(q) for q in (x, y))
stamps = self.render_model(x, y, stampsz=stampsz, deriv=deriv)
if deriv:
stamps, dpsfdx, dpsfdy = stamps
dpsfdx = dpsfdx.reshape(tparshape+(stampsz, stampsz))
dpsfdy = dpsfdy.reshape(tparshape+(stampsz, stampsz))
stamps = stamps.reshape(tparshape+(stampsz, stampsz))
norm = numpy.atleast_1d(self.norm(x, y))
shiftx = numpy.atleast_1d(shiftx)
shifty = numpy.atleast_1d(shifty)
for i in range(stamps.shape[0]):
stamps[i, :, :] = shift(stamps[i, :, :], (shiftx[i], shifty[i]))
stamps /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
stamps = stamps.reshape(stamps.shape[1:])
if deriv:
for i in range(stamps.shape[0]):
dpsfdx[i, :, :] = shift(dpsfdx[i, :, :],
(shiftx[i], shifty[i]))
dpsfdy[i, :, :] = shift(dpsfdy[i, :, :],
(shiftx[i], shifty[i]))
dpsfdx /= norm.reshape(-1, 1, 1)
dpsfdy /= norm.reshape(-1, 1, 1)
if tparshape != parshape:
dpsfdx = dpsfdx.reshape(stamps.shape[1:])
dpsfdy = dpsfdy.reshape(stamps.shape[1:])
stamps = (stamps, dpsfdx, dpsfdy)
return stamps
def select_stamps(psfstack, imstack, weightstack, shiftx, shifty):
if psfstack.shape[0] == 0:
return numpy.ones(0, dtype='bool')
tflux = numpy.sum(psfstack, axis=(1, 2))
timflux = numpy.sum(imstack, axis=(1, 2))
tmedflux = numpy.median(psfstack, axis=(1, 2))
npix = psfstack.shape[1]*psfstack.shape[2]
tfracflux = tflux / numpy.clip(timflux, 100, numpy.inf)
tfracflux2 = ((tflux-tmedflux*npix) /
numpy.clip(timflux, 100, numpy.inf))
# tfracflux3 = ((tflux - tmedflux*npix)/
# numpy.clip(timflux-tmedflux*npix, 100, numpy.inf))
cx, cy = (imstack.shape[-2] // 2, imstack.shape[-1] // 2)
cenflux = imstack[:, cx, cy]
psfqf = (numpy.sum(psfstack*(weightstack > 0), axis=(1, 2)) /
(tflux + (tflux == 0)))
okpsf = ((numpy.abs(psfqf - 1) < 0.03) &
(tfracflux > 0.5) & (tfracflux2 > 0.2) &
(weightstack[:, cx, cy] > 0) &
(cenflux*weightstack[:, cx, cy] > 3))
if numpy.sum(okpsf) > 0:
shiftxm = numpy.median(shiftx[okpsf])
shiftym = numpy.median(shifty[okpsf])
okpsf = (okpsf &
(numpy.abs(shiftx-shiftxm) < 1.) &
(numpy.abs(shifty-shiftym) < 1.))
return okpsf
def shift_and_normalize_stamps(psfstack, modstack, weightstack,
shiftx, shifty):
xr = numpy.round(shiftx)
yr = numpy.round(shifty)
psfstack = psfstack.copy()
weightstack = weightstack.copy()
psfstack = (psfstack -
numpy.median(psfstack-modstack, axis=(1, 2)).reshape(-1, 1, 1))
norms = numpy.sum(psfstack, axis=(1, 2))
psfstack /= norms.reshape(-1, 1, 1)
weightstack *= norms.reshape(-1, 1, 1)
for i in range(psfstack.shape[0]):
psfstack[i, :, :] = shift(psfstack[i, :, :], [-shiftx[i], -shifty[i]])
if (numpy.abs(xr[i]) > 0) or (numpy.abs(yr[i]) > 0):
weightstack[i, :, :] = shift(weightstack[i, :, :],
[-xr[i], -yr[i]],
mode='constant', cval=0.)
return psfstack, weightstack
def fill_param_matrix(param, order):
ret = numpy.zeros((order+1, order+1)+param.shape[1:], dtype='f4')
ret[numpy.tril_indices(order+1)] = param
return ret[::-1, ...]
def extract_params(param, order, pixsz):
nperpar = (order+1)*(order+2)/2
if (pixsz**2.+3)*nperpar != len(param):
raise ValueError('Bad parameter vector size?')
return [fill_param_matrix(x, order) for x in
(param[0:nperpar], param[nperpar:nperpar*2],
param[nperpar*2:nperpar*3],
param[nperpar*3:nperpar*(3+pixsz**2)].reshape(nperpar, pixsz,
pixsz))]
def extract_params_moffat(param, order):
nperpar = (order+1)*(order+2)/2
if 3*nperpar != len(param):
raise ValueError('Bad parameter vector size?')
return [fill_param_matrix(x, order) for x in
(param[0:nperpar], param[nperpar:nperpar*2],
param[nperpar*2:nperpar*3])]
def plot_psf_fits(stamp, x, y, model, isig, name=None, save=False):
from matplotlib import pyplot as p
datim = numpy.zeros((stamp.shape[1]*10, stamp.shape[1]*10), dtype='f4')
modim = numpy.zeros((stamp.shape[1]*10, stamp.shape[1]*10), dtype='f4')
xbd = numpy.linspace(numpy.min(x)-0.01, numpy.max(x)+0.01, 11)
ybd = numpy.linspace(numpy.min(y)-0.01, numpy.max(y)+0.01, 11)
medmodel = numpy.median(model, axis=0)
sz = stamp.shape[-1]
for i in range(10):
for j in range(10):
m = numpy.flatnonzero((x > xbd[i]) & (x <= xbd[i+1]) &
(y > ybd[j]) & (y <= ybd[j+1]))
if len(m) == 0:
continue
ind = m[numpy.argmax(numpy.median(isig[m, :, :], axis=(1, 2)))]
datim0 = stamp[ind, :, :]
modim0 = model[ind, :, :]
datim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = datim0-medmodel
modim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = modim0-medmodel
p.figure(figsize=(24, 8), dpi=150)
p.subplot(1, 3, 1)
p.imshow(datim, aspect='equal', vmin=-0.005, vmax=0.005, cmap='binary')
p.title('Stamps')
p.subplot(1, 3, 2)
p.imshow(modim, aspect='equal', vmin=-0.005, vmax=0.005, cmap='binary')
p.title('Model')
p.subplot(1, 3, 3)
p.imshow(datim-modim, aspect='equal', vmin=-0.001, vmax=0.001,
cmap='binary')
p.title('Residuals')
if save:
import matplotlib
matplotlib.use('Agg')
p.style.use('dark_background')
p.savefig('psf_'+name[1]+'_'+str(name[0])+'.png', dpi=150,
bbox_inches='tight', pad_inches=0.1)
def plot_psf_fits_brightness(stamp, x, y, model, isig):
from matplotlib import pyplot as p
import util_efs
nx, ny = 10, 10
datim = numpy.zeros((stamp.shape[1]*nx, stamp.shape[1]*ny), dtype='f4')
modim = numpy.zeros((stamp.shape[1]*nx, stamp.shape[1]*ny), dtype='f4')
medmodel = numpy.median(model, axis=0)
s = numpy.argsort(-numpy.median(isig, axis=(1, 2)))
sz = stamp.shape[-1]
for i in range(nx):
for j in range(ny):
if i*ny+j >= len(s):
continue
ind = s[i*ny+j]
datim0 = stamp[ind, :, :]
modim0 = model[ind, :, :]
datim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = datim0-medmodel
modim[i*sz:(i+1)*sz, j*sz:(j+1)*sz] = modim0-medmodel
p.figure('psfs')
p.subplot(1, 3, 1)
util_efs.imshow(datim, aspect='equal', vmin=-0.005, vmax=0.005)
p.title('Stamps')
p.subplot(1, 3, 2)
util_efs.imshow(modim, aspect='equal', vmin=-0.005, vmax=0.005)
p.title('Model')
p.subplot(1, 3, 3)
util_efs.imshow(datim-modim, aspect='equal', vmin=-0.001, vmax=0.001)
p.title('Residuals')
p.draw()
def damper(chi, damp):
return 2*damp*numpy.sign(chi)*(numpy.sqrt(1+numpy.abs(chi)/damp)-1)
def fit_variable_moffat_psf(x, y, xcen, ycen, stamp, imstamp, modstamp,
isig, order=1, pixsz=9, nkeep=200, plot=False, name=None):
# clean and shift the PSFs first.
shiftx = xcen + x - numpy.round(x)
shifty = ycen + y - numpy.round(y)
okpsf = select_stamps(stamp, imstamp, isig, shiftx, shifty)
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
if len(x) > nkeep:
fluxes = numpy.sum(stamp, axis=(1, 2))
s = numpy.argsort(-fluxes)
okpsf = (fluxes >= fluxes[s][nkeep-1])
x, y, xcen, ycen = (q[okpsf] for q in (x, y, xcen, ycen))
stamp, modstamp, isig, imstamp, shiftx, shifty = (
q[okpsf] for q in (stamp, modstamp, isig, imstamp, shiftx, shifty))
stamp, isig = shift_and_normalize_stamps(stamp, modstamp, isig,
shiftx, shifty)
isig = numpy.clip(isig, 0., 1./(0.1*0.001))
isig_nocen = isig.copy()
if stamp.shape[0] > 50:
central_stamp(isig_nocen, censize=pixsz)[:, :, :] = 0.
def make_full_psf_model(param, order, pixsz):
fwhm, xy, yy, resid = extract_params(param, order, pixsz)
return VariableMoffatPixelizedPSF(resid, fwhm, 3., xy=xy, yy=yy)
def make_moffat_psf_model(param, order):
fwhm, xy, yy = extract_params_moffat(param, order)
return VariableMoffatPSF(fwhm, 3., xy=xy, yy=yy)
def chimoff(param, isig):
norm = param[-1]
psf = make_moffat_psf_model(param[:-1], order)
tresid = (stamp -
norm*psf.render_model(x, y, stampsz=stamp.shape[-1]))
tchi = damper(tresid*isig, 3.).reshape(-1).astype('f4')
return tchi
def chipix(param, resid, isig):
from numpy.polynomial.polynomial import polyval2d
mat = fill_param_matrix(param, order)
tchi = (resid - polyval2d(x/1000., y/1000., mat))*isig
return damper(tchi, 3.).reshape(-1)
nperpar = (order+1)*(order+2)/2
guess = numpy.zeros(3*nperpar+1, dtype='f4')
constanttermindex = nperpar - order - 1
guess[0+constanttermindex] = 4. # 1" PSF
# guess[nperpar+constanttermindex] = 3. # beta
guess[nperpar*2+constanttermindex] = 1. # yy
guess[-1] = 1. # overall normalization
# all others can be zero.
from scipy import optimize
resmoff = optimize.leastsq(chimoff, guess, args=(isig_nocen,),
full_output=True)
residfitdict = {}
residguess = numpy.zeros(nperpar, dtype='f4')
moffpsf = make_moffat_psf_model(resmoff[0][:-1], order)
resid = (stamp - resmoff[0][-1] *
moffpsf.render_model(x, y, stampsz=stamp.shape[-1])).astype('f4')
resid_cen = central_stamp(resid, censize=pixsz)
isig_cen = central_stamp(isig, censize=pixsz)
for i in range(pixsz):
for j in range(pixsz):
args = (resid_cen[:, i, j], isig_cen[:, i, j])
residfitdict[i, j] = optimize.leastsq(chipix, residguess,
args=args, full_output=True)
fullparam = numpy.zeros((3+pixsz**2)*nperpar+1, dtype='f4')
fullparam[0:3*nperpar] = resmoff[0][0:3*nperpar]
fullparam[-1] = resmoff[0][-1]
resparam = numpy.array([[residfitdict[i, j][0]/fullparam[-1]
for j in range(pixsz)]
for i in range(pixsz)])
resparam = resparam.transpose(2, 0, 1)
fullparam[3*nperpar:(3+pixsz**2)*nperpar] = resparam.reshape(-1)
psf = make_full_psf_model(fullparam[:-1], order, pixsz)
if plot != 0:
norm = fullparam[-1]
modstamps = norm*psf.render_model(x, y, stampsz=stamp.shape[-1])
if plot == 1:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name)
else:
plot_psf_fits(stamp, x, y, modstamps, isig, name=name, save=True)
return psf
def fit_moffat(stamp, isig=None):
if isig is None:
isig = numpy.ones_like(stamp, dtype='f4')
def chimoff(param):
model = param[0]*moffat_psf(param[1], beta=param[2], xy=param[3],
yy=param[4], stampsz=stamp.shape[0],
deriv=False)
chi = (stamp-model)*isig
return damper(chi, 5).reshape(-1).astype('f4')
from scipy import optimize
guess = numpy.array([1., 4., 3., 0., 1.]).astype('f4')
res = optimize.leastsq(chimoff, guess, full_output=True, epsfcn=1e-2)
return res
def sum_prof(param, stampsz=59, prof='moffat'):
res = numpy.zeros((stampsz, stampsz), dtype='f4')
npar = 3 if prof == 'moffat' else 2
ncomp = len(param) / npar
for i in range(ncomp):
if prof == 'moffat':
tres = moffat_psf(param[i*npar+1], beta=param[i*npar+2], xy=0.,
yy=1, stampsz=stampsz,
deriv=False)
elif prof == 'gaussian':
tres = gaussian_psf(param[i*npar+1], stampsz=stampsz,
deriv=False)
res += tres*param[i*npar]
return res*param[0]
def fit_sum_prof(stamp, ncomp=3, isig=None, prof='moffat'):
if isig is None:
isig = numpy.ones_like(stamp, dtype='f4')
def chiprof(param):
chi = (stamp-sum_prof(param, stampsz=stamp.shape[-1], prof=prof))*isig
return damper(chi, 5).reshape(-1).astype('f4')
guessnorm = numpy.ones(ncomp)/1.0/ncomp
guessfwhm = 4*numpy.exp(numpy.linspace(0, numpy.log(stamp.shape[-1]/10),
ncomp))
guessbeta = 3.5-1*numpy.linspace(0, 1, ncomp)
guess = []
if prof == 'moffat':
for n, b, f in zip(guessnorm, guessfwhm, guessbeta):
guess += [n, b, f]
else:
for n, f in zip(guessnorm, guessfwhm):
guess += [n, f]
from scipy import optimize
guess = numpy.array(guess).astype('f4')
res = optimize.leastsq(chiprof, guess, full_output=True)
return res
def gaussian(major, minor, rotation, stampsz):
sigmafac = 1 / numpy.sqrt(8*numpy.log(2))
major = major * sigmafac
minor = minor * sigmafac
stampszo2 = stampsz // 2
dx = numpy.arange(stampsz).reshape(1, -1, 1)-stampszo2
dy = dx.copy().reshape(1, 1, -1)
major = numpy.abs(major).reshape(-1, 1, 1)
minor = numpy.abs(minor).reshape(-1, 1, 1)
rotation = rotation.reshape(-1, 1, 1)
r2 = ((dx* | numpy.cos(rotation) | numpy.cos |
import datetime,os
import argparse
import glob
import sys
import numpy as np
from bunch import Bunch
import io_routines as io
version="1.0"
def set_bounds(info):
atm_file=info.atmdir+info.atmfile
cesm_file=atm_file.replace("_Y_",str(info.start_year)).replace("_VAR_","Q").replace("_ENS_",info.ensemble).replace("_EXP_",info.experiment)
try:
cesm_file=glob.glob(cesm_file)[0]
except:
print("ERROR searching for: "+cesm_file)
sys.exit(1)
varlist=["lat","lon"]
lat=io.read_nc(cesm_file,varlist[0]).data
lon=io.read_nc(cesm_file,varlist[1]).data#-360
try:
info.xmin=np.where(lon>=info.lon[0])[0][0]
info.xmax= | np.where(lon<=info.lon[1]) | numpy.where |
# Copyright (c) <NAME>. All rights reserved.
import wave
from os import remove
from time import sleep
import nltk
import numpy as np
import pyaudio
from aip import AipSpeech
class VoiceRecognizer(object):
def __init__(self):
self.APP_ID = '11615546'
self.API_KEY = 'Agl9OnFc63ssaEXQGLvkop7c'
self.SECRET_KEY = '<KEY>'
self.client = AipSpeech(self.APP_ID, self.API_KEY, self.SECRET_KEY)
self.CHUNK = 1024
self.FORMAT = pyaudio.paInt16
self.RATE = 16000
self.CHANNELS = 1
self.RECORD_SECONDS = 1
self.WAVE_OUTPUT_FILENAME = 'output.wav'
self.NN_IGNORE_LIST = [
'piece', 'cup', 'bottle', 'bar', 'spoon', 'bowl', 'oh'
]
def get_file_content(self, filePath):
with open(filePath, 'rb') as fp:
return fp.read()
# Return keywords of the speech
def recognize(self):
# Recognize voice via Baidu API
res = self.client.asr(
self.get_file_content(self.WAVE_OUTPUT_FILENAME), 'wav', 16000, {
'dev_pid': 1737,
})
# Remove temp wav file
remove(self.WAVE_OUTPUT_FILENAME)
if res['err_no'] == 0:
print('Result:', res['result'][0])
words = nltk.word_tokenize(str(res['result'][0]))
tagged_words = nltk.pos_tag(words)
# print('Tagged:', tagged_words)
for item in tagged_words:
if item[1] == 'NN' and item[0] in [
'bread', 'breath', 'crap', 'crab'
]:
print('Keyword: bread\n')
return 'bread'
for item in tagged_words:
if item[1] == 'NN' and item[0] not in self.NN_IGNORE_LIST:
print('Keyword:', item[0], '\n')
return item[0]
print('No keyword found\n')
return False
else:
print('Error:', res['err_msg'], '\n')
return False
def monitor(self):
print('* testing noise')
sleep(1)
p = pyaudio.PyAudio()
stream = p.open(
format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
frames_per_buffer=self.CHUNK)
while True:
test_data = stream.read(self.CHUNK)
noise_data = | np.fromstring(test_data, dtype=np.short) | numpy.fromstring |
"""
With thanks to 2019 SF2 Group 7 (<NAME> - <EMAIL>, <NAME> -
<EMAIL>), who did the bulk of the porting from matlab to Python.
"""
import warnings
import numpy as np
from .laplacian_pyramid import quant1, quant2
from .dct import dct_ii, colxfm, regroup
from .lbt import pot_ii
from cued_sf2_lab.dwt import idwt
from cued_sf2_lab.dwt import dwt
def nlevdwt(X, n):
# your code here
m = X.shape[0]
Q = X.copy()
for i in range(n):
Q[:m,:m]=dwt(Q[:m,:m])
m=m//2
return Q
def nlevidwt(Y, n):
m = Y.shape[0]//(2**(n-1))
Q = Y.copy()
for i in range(n):
Q[:m,:m] = idwt(Q[:m,:m])
m*=2
return Q
def quantdwt(Y, dwtstep,rise_ratio = None):
"""
Parameters:
Y: the output of `dwt(X, n)`
dwtstep: an array of shape `(3, n+1)`
Returns:
Yq: the quantized version of `Y`
dwtenc: an array of shape `(3, n+1)` containing the entropies
"""
n = dwtstep.shape[1]-1
m = Y.shape[0]
dwtent = np.zeros((3, n+1));
Q = Y.copy()
if rise_ratio is None:
rise_ratio = 0.5*np.ones(dwtstep.shape)
# print(rise_ratio)
for i in range(n):
m=m//2
Q[:m,m:2*m] = quantise(Y[:m,m:2*m], dwtstep[0,i],rise_ratio[0,i]*dwtstep[0,i])
dwtent[0,i]=bpp(Q[:m,m:2*m])
Q[m:2*m,:m] = quantise(Y[m:2*m,:m], dwtstep[1,i],rise_ratio[1,i]*dwtstep[1,i])
dwtent[1,i]=bpp(Q[m:2*m,:m])
Q[m:2*m,m:2*m] = quantise(Y[m:2*m,m:2*m], dwtstep[2,i],rise_ratio[2,i]*dwtstep[2,i])
dwtent[2,i]=bpp(Q[m:2*m,m:2*m])
Q[:m,:m] = quantise(Y[:m,:m], dwtstep[0,n],rise_ratio[0,n]*dwtstep[0,n])
dwtent[0,n] = bpp(Q[:m,:m])
return Q, dwtent
def get_quantisation_step_ratio(n):
''' Returns the quantisation ratios between successive images in the pyramid needed to
ensure equal MSE contribution'''
test_image = np.zeros((256,256))
Y = nlevdwt(test_image,n)
energies = np.zeros((3,n+1))
dwtstep = np.zeros((3,n+1))
m,x = Y.shape[0],Y.shape[1]
for i in range(n):
Y = nlevdwt(test_image,n)
# top right -> k = 0
# print(np.nonzero(Y))
Y[:m//2,x//2:x][Y[:m//2,x//2:x].shape[0]//2-1,Y[:m//2,x//2:x].shape[1]//2-1] = 100
Z = nlevidwt(Y,n)
energy_tr = np.sum(Z**2)
energies[0,i] = energy_tr
Y[:m//2,x//2:x][Y[:m//2,x//2:x].shape[0]//2-1,Y[:m//2,x//2:x].shape[1]//2-1] = 0
# bottom right -> k = 1
Y = nlevdwt(test_image,n)
# Ybr = Y[m//2:m,x//2:x]
# Ybr[Ybr.shape[0]//2-1,Ybr.shape[1]//2-1] = 100
# Y[m//2:m,x//2:x] = Ybr
Y[m//2:m,x//2:x][Y[m//2:m,x//2:x].shape[0]//2-1,Y[m//2:m,x//2:x].shape[1]//2-1] = 100
Z = nlevidwt(Y,n)
energy_br = np.sum(Z**2)
energies[1,i] = energy_br
# Ybr[Ybr.shape[0]//2-1,Ybr.shape[1]//2-1] = 0
# Y[m//2:m,x//2:x] = Ybr
Y[m//2:m,x//2:x][Y[m//2:m,x//2:x].shape[0]//2-1,Y[m//2:m,x//2:x].shape[1]//2-1] = 0
# bottom left -> k = 2
Y = nlevdwt(test_image,n)
Y[m//2:m,:x//2][Y[m//2:m,:x//2].shape[0]//2-1,Y[m//2:m,:x//2].shape[1]//2-1] = 100
# Ybr = Y[m//2:m,:x//2]
# Y[m//2:m,:x//2]
# Ybr[Ybr.shape[0]//2-1,Ybr.shape[1]//2-1] = 100
# Y[m//2:m,:x//2] = Ybr
Z = nlevidwt(Y,n)
energy_br = np.sum(Z**2)
energies[2,i] = energy_br
# Ybr[Ybr.shape[0]//2-1,Ybr.shape[1]//2-1] = 0
# Y[m//2:m,:x//2] = Ybr
Y[m//2:m,:x//2][Y[m//2:m,:x//2].shape[0]//2-1,Y[m//2:m,:x//2].shape[1]//2-1] = 0
m //= 2
x //= 2
Y = nlevdwt(test_image,n)
Y[:m,:x][Y[:m,:x].shape[0]//2-1,Y[:m,:x].shape[1]//2-1] = 100
Z = nlevidwt(Y,n)
energy_tr = np.sum(Z**2)
energies[0,n] = energy_tr
ratios = []
ratios = np.sqrt(energies[0,0]/energies)
Y[:m,:x][Y[:m,:x].shape[0]//2-1,Y[:m,:x].shape[1]//2-1] = 0
# for i in range(1,n):
# ratios.append([np.sqrt(energies[0,i-1]/energies[0,i]),np.sqrt(energies[1,i-1]/energies[1,i]),np.sqrt(energies[2,i-1]/energies[2,i])])
# ratios.append([np.sqrt(energies[0,n-1]/energies[0,n]),1,1])
return ratios
def diagscan(N):
'''
Generate diagonal scanning pattern
Return: scan: a diagonal scanning index for
an NxN matrix
The first entry in the matrix is assumed to be the DC coefficient
and is therefore not included in the scan
'''
# Copied from matlab without accounting for indexing.
slast = N + 1
scan = [slast]
while slast != N * N:
while slast > N and slast % N != 0:
slast = slast - (N - 1)
scan.append(slast)
if slast < N:
slast = slast + 1
else:
slast = slast + N
scan.append(slast)
if slast == N * N:
break
while slast < (N * N - N + 1) and slast % N != 1:
slast = slast + (N - 1)
scan.append(slast)
if slast == N * N:
break
if slast < (N * N - N + 1):
slast = slast + N
else:
slast = slast + 1
scan.append(slast)
# Python indexing
return np.array(scan) - 1
def runampl(a):
'''
RUNAMPL Create a run-amplitude encoding from input stream
[ra] = RUNAMPL(a) Converts the stream of integers in 'a' to a
run-amplltude encoding in 'ra'
Column 1 of ra gives the runs of zeros between each non-zero value.
Column 2 gives the JPEG sizes of the non-zero values (no of
bits excluding leading zeros).
Column 3 of ra gives the values of the JPEG remainder, which
is normally coded in offset binary.
Parameters:
a: is a integer stream (array)
Returns:
ra: (,3) nparray
'''
# Check for non integer values in a
if sum(abs(np.remainder(a, 1))):
raise ValueError("Warning! RUNAMPL.M: Attempting to create" +
" run-amplitude from non-integer values")
b = np.where(a != 0)[0]
if len(b) == 0:
ra = np.array([[0, 0, 0]])
return ra
# List non-zero elements as a column vector
c = np.reshape(a[b], (b.shape[0], 1)).astype('int')
# Generate JPEG size vector ca = floor(log2(abs(c)) + 1)
ca = np.zeros(c.shape).astype('int')
k = 1
cb = np.abs(c)
maxc = np.max(cb)
ka = np.array([[1]])
while k <= maxc:
ca = ca + (cb >= k)
k = k * 2
ka = np.concatenate((ka, np.array([[k]])))
cneg = np.where(c < 0)[0]
# Changes expression for python indexing
c[cneg] = c[cneg] + ka[ca[cneg].flatten()] - 1
bcol = np.reshape(b, (len(b), 1))
# appended -1 instead of 0.
col1 = np.diff(np.concatenate((np.array([[-1]]), bcol)).flatten()) - 1
col1 = np.reshape(col1, (col1.shape[0], 1))
ra = np.concatenate((col1, ca, c), axis=1)
ra = np.concatenate((ra, np.array([[0, 0, 0]])))
return ra
def huffdflt(typ):
"""
HUFFDFLT Generates default JPEG huffman table
[bits, huffval] = HUFFDFLT(type) Produces the luminance (type=1) or
chrominance (type=2) tables.
The number of values per bit level is stored in 'bits', with the
corresponding codes in 'huffval'.
Parameters:
typ: Integer
Returns:
bits: (16, ) nparray
huffval: (162, ) nparray
"""
if typ == 1:
bits = np.array([0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 125])
huffval = np.concatenate((
[], # 1-bit
[1, 2], # 2-bit
[3], # 3-bit
[0, 4, 17], # 4-bit
[5, 18, 33], # 5-bit
[49, 65], # 6-bit
[6, 19, 81, 97], # 7-bit
[7, 34, 113], # 8-bit
[20, 50, 129, 145, 161], # 9-bit
[8, 35, 66, 177, 193], # 10-bit
[21, 82, 209, 240], # 11-bit
[36, 51, 98, 114], # 12-bit
[], # 13-bit
[], # 14-bit
[130], # 15-bit
[9, 10, 22, 23, 24, 25, 26, 37, 38, 39,
40, 41, 42, 52, 53, 54, 55], # 16-bit
[56, 57, 58, 67, 68, 69, 70, 71, 72,
73, 74, 83, 84, 85, 86, 87, 88, 89],
[90, 99, 100, 101, 102, 103, 104, 105, 106,
115, 116, 117, 118, 119, 120, 121, 122, 131],
[132, 133, 134, 135, 136, 137, 138, 146, 147,
148, 149, 150, 151, 152, 153, 154, 162, 163],
[164, 165, 166, 167, 168, 169, 170, 178, 179,
180, 181, 182, 183, 184, 185, 186, 194, 195],
[196, 197, 198, 199, 200, 201, 202, 210, 211,
212, 213, 214, 215, 216, 217, 218, 225, 226],
[227, 228, 229, 230, 231, 232, 233, 234, 241,
242, 243, 244, 245, 246, 247, 248, 249, 250]
)).astype('int')
else:
bits = np.array([0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 119])
huffval = np.concatenate((
[], # 1-bit
[0, 1], # 2-bit
[2], # 3-bit
[3, 17], # 4-bit
[4, 5, 33, 49], # 5-bit
[6, 18, 65, 81], # 6-bit
[7, 97, 113], # 7-bit
[19, 34, 50, 129], # 8-bit
[8, 20, 66, 145, 161, 177, 193], # 9-bit
[9, 35, 51, 82, 240], # 10-bit
[21, 98, 114, 209], # 11-bit
[10, 22, 36, 52], # 12-bit
[], # 13-bit
[225], # 14-bit
[37, 241], # 15-bit
[23, 24, 25, 26, 38, 39, 40, 41, 42, 53, 54], # 16-bit
[55, 56, 57, 58, 67, 68, 69, 70, 71,
72, 73, 74, 83, 84, 85, 86, 87, 88],
[89, 90, 99, 100, 101, 102, 103, 104, 105,
106, 115, 116, 117, 118, 119, 120, 121, 122],
[130, 131, 132, 133, 134, 135, 136, 137, 138,
146, 147, 148, 149, 150, 151, 152, 153, 154],
[162, 163, 164, 165, 166, 167, 168, 169, 170,
178, 179, 180, 181, 182, 183, 184, 185, 186],
[194, 195, 196, 197, 198, 199, 200, 201, 202,
210, 211, 212, 213, 214, 215, 216, 217, 218],
[226, 227, 228, 229, 230, 231, 232, 233, 234,
242, 243, 244, 245, 246, 247, 248, 249, 250]
)).astype('int')
return [bits, huffval]
def huffgen(bits, huffval):
"""
HUFFGEN Generate huffman codes
[huffcode, ehuf] = HUFFGEN(bits, huffval) Translates the number
of codes at each bit (in bits) and the valid values (in huffval).
huffcode lists the valid codes in ascending order. ehuf is a
two-column vector, with one entry per possible 8-bit value. The
first column lists the code for that value, and the second lists
the length in bits.
Parameters:
bits: 1D Numpy array.
huffval: 1D Numpy array.
Returns:
huffcode: nparray (ncodes, 1)
ehuf: nparray (256, 2)
"""
# Generate huffman size table (JPEG fig C1, p78):
nb = bits.shape[0]
k = 1 # Max value of k is 162
j = 1
# sum on nparray sums columns.
ncodes = sum(bits)
# Check every where 1_D array of zeros/ones defined like this.
huffsize = np.zeros((ncodes, 1), dtype=int)
for i in range(nb):
while j <= bits[i]:
huffsize[k - 1, 0] = i + 1
k += 1
j += 1
j = 1
huffcode = np.zeros((ncodes, 1), dtype=int)
code = 0
si = huffsize[0, 0]
# Generate huffman code table (JPEG fig C2, p79)
for k in range(ncodes):
while huffsize[k, 0] > si:
code = code * 2
si += 1
huffcode[k, 0] = code
code += 1
# Reorder the code tables according to the data in
# huffval to yield the encoder look-up tables.
ehuf = | np.zeros((256, 2), dtype=int) | numpy.zeros |
# coding: utf-8
"""
hists.py
Copyright (c) 2014 <NAME> <<EMAIL>>
An easy, quick, lightweight histogram class based on ndarray
Initialise with bin indices:
>>> a = Hist([0, 1, 2, 3])
>>> len(a)
3
>>> a.bins
array([0, 1, 2, 3])
Optionally include data:
>>> Hist([0, 1, 2, 3], data=[1, 0.2, 3])
Hist([0, 1, 2, 3], data=[ 1. , 0.2, 3. ])
Or just specify the blank data type:
>>> a = Hist([0, 1, 2, 3], dtype=int)
>>> a
Hist([0, 1, 2, 3], data=[0, 0, 0])
You can do any normal numpy arithmetic operations:
>>> a = Hist([0, 1, 2, 3], data=[1, 0.2, 3])
>>> b = a + a
>>> b -= a
>>> all(a == b)
True
And you can fill bins from values:
>>> a = Hist([0,1,2,3])
>>> a.fill(1.4, 3)
>>> a
Hist([0, 1, 2, 3], data=[ 0., 3., 0.])
Or from arrays:
>>> a = Hist([0,1,2,3])
>>> a.fill([1.4, 2.4], weights=[1, 2])
>>> a
Hist([0, 1, 2, 3], data=[ 0., 1., 2.])
If you use pyROOT, you can convert from 1D histograms:
>>> type(source)
<class 'ROOT.TH1D'>
>>> convert = ashist(source)
>>> type(convert)
<class 'simplehist.hists.Hist'>
Or conversion from custom types - see simplehist.converter for
implementation details.
You can also draw histograms, using any of the options
that can be passed to matplotlib.pyplot.plot:
>>> hist_object.draw_hist(lw=2)
"""
import sys
import numpy
# A numpy array with bins, and constraints on those bins
class Hist(numpy.ndarray):
def __new__(cls, bins, data=None, **kwargs):
# If bins contains items that are list-like then it is probably multidim
if isinstance(bins[0], (tuple, list)):
# It must be multi-dimension...
bins = tuple(numpy.asarray(x) for x in bins)
ndims = len(bins)
shape = tuple(len(x)-1 for x in bins)
else:
# Just a single dimension
bins = numpy.asarray(bins)
assert bins.ndim == 1
ndims = 1
shape = (len(bins)-1,)
# Create or validate the data shape
if data is None:
# data = numpy.zeros(tuple(x-1 for x in bins.shape), **kwargs)
data = numpy.zeros(shape, **kwargs)
else:
data = numpy.asarray(data, **kwargs)
# Same dimensions and shape-1
assert ndims == data.ndim
if ndims == 1:
assert all(x == len(y)-1 for x, y in zip(data.shape, [bins]))
else:
assert all(x == len(y)-1 for x, y in zip(data.shape, bins))
# Cast from our data array
obj = data.view(cls)
obj._bins = bins
return obj
def __array_finalize__(self, obj):
# Since always creating as an alternate, this should never happen
assert obj is not None
# Other should always have a _bins object
self._bins = getattr(obj,"_bins",None)
def __array_wrap__(self,obj,context=None):
# if obj.ndim == 0 and obj.size == 1:
# return obj.item()
# Don't wrap as a hist if the shape changed - we have no idea how it did so
if not obj.shape == self.shape:
return obj
return super(Hist,self).__array_wrap__(obj,context)
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, value):
value = numpy.asarray(value)
assert value.ndim == self.ndim
assert all(x == y-1 for x, y in zip(self.shape, value.shape))
self._bins = value
def __getitem__(self, index):
"""Return a value, or a subhist from a slice.
Getting singular indices just returns the values, whilst slices return
subhists, with applicable bins."""
return super(Hist, self).__getitem__(index)
if isinstance(index, tuple) and self.ndim == 1:
binSel = []
# Build a new tuple for each of the entries
for selection in index:
if selection is Ellipsis:
binSel.append(Ellipsis)
elif isinstance(selection, slice):
# Stepping really doesn't make much sense with bins
assert selection.step is None or selection.step == 1
if selection.stop is not None:
binSel.append(slice(selection.start, min(sys.maxint,selection.stop+1)))
else:
binSel.append(slice(selection.start, None))
elif isinstance(selection, int):
binSel.append(slice(selection, selection+1))
else:
# Throw away the hist information as we don't understand the request
return super(Hist, self).__getitem__(index).view(numpy.ndarray)
#assert False
# Build a new histogram with these bins
ret = super(Hist,self).__getitem__(index).view(Hist)
# If this gave us a hist..
if hasattr(ret, "_bins"):
ret._bins = self._bins.__getitem__(tuple(binSel))
return ret
else:
return super(Hist, self).__getitem__(index)
def __getslice__(self, i, j):
return self.__getitem__((slice(i,j),))
def __repr__(self):
# if numpy.all(self == 0):
# # Bin-only output
# return "{}(bins={})".format(type(self).__name__, numpy.array_repr(self._bins))
# else:
if self.ndim == 1:
return "{}({}, data={})".format(type(self).__name__,
numpy.array_repr(self._bins)[len("array("):-1],
| numpy.array_repr(self) | numpy.array_repr |
#================================LabFuncs.py===================================#
# Created by <NAME> 2019
# Description:
# Contains an assortment of functions that are all related to the 'Lab' somehow
# e.g. the nuclear form factor, lab velocity etc.
# Contains:
#####
# FormFactorHelm: Only Form factor being used atm
#####
##### Resolutions
# Smear: Applies angular resolution to a recoil map as a function of direction
# SmearE: Applies energy resolution to a recoil spectrum as a function of energy
#####
##### Lab velocity
# LabVelocity: Full lab velocity in (N,W,Z) with Earth rotation
# LabVelocitySimple: Simplified Lab velocity in galactic coordinates
# JulianDay: JulianDay at dd-mm-yyyy hh:hh
# EarthVelocity: Earth velocity to second order in eccentricity
# EarthVector: Earth radius vector to second order in eccentricity
# v_infinity: transforms velocity to the value outside the Sun's potential
# v_infinity_alt: same as v_inficity but with a different velocity discretisation
#####
##### Solar direction:
# EarthSunDistance: Distance between Earth and Sun as a function of time
# SolarDirection: Direction of the sun at a given time
#####
##### Co-ordinate transformations
# eqt2lab: Equatorial system to laboratory system
# gal2eqt: Galactic system to equatorial system
# gal2lab: Galactic system to lab system
#####
#==============================================================================#
import numpy as np
from numpy import cos, sin, pi, floor, exp, sqrt, size, zeros, shape, arccos
from numpy import array, trapz
import Params
#==============================Form Factors====================================#
def FormFactorHelm(E_r,A):
q = sqrt(2*A*931.5*1000*E_r)*1.0e-12/1.97e-7
c1 = 1.23*A**(1.0/3.0)-0.6
s = 0.9
R_1 = sqrt(c1**2 + (7.0/3.0)*pi**2.0*(0.52**2.0) - 5*s**2.0)
F = (3*(sin(q*R_1) - q*R_1*cos(q*R_1))*exp(-q*q*s*s/2.0)/(q*R_1)**3)
return F
#------------------------------------------------------------------------------#
#=======================Apply Angular Resolution===============================#
def Smear(x,dR,sig_gamma):
# x = cartesian vectors
# dR = value of rate at directions in x
# sig_gamma = Gaussian width to smear dR by
npix = size(dR)
dR_smeared = zeros(shape=shape(dR))
for i in range(0,npix):
x0 = x[i,:]
gamma = x0[0]*x[:,0] + x0[1]*x[:,1] + x0[2]*x[:,2]
gamma[i] = 1.0
gamma = arccos(gamma)
dR_smeared[i] = sum(dR*exp(-gamma**2.0/(2*sig_gamma**2.0)))
# Make sure it's normalised to what it was before the smearing
dR_smeared = dR_smeared*sum(dR)/sum(dR_smeared)
return dR_smeared
#------------------------------------------------------------------------------#
#===========================Apply Energy Res===================================#
def SmearE(E,dR,sig_E):
# E = energies
# dR = value of rate at energies in E
# sig_E = Gaussian width to smear dR by
nE = size(dR)
dR_smeared = zeros(shape=shape(dR))
if size(sig_E)==1:
sig_E *= ones(shape=shape(dR))
for i in range(0,nE):
Ediff = abs(E-E[i])
dR_smeared[i] = trapz(dR*exp(-Ediff**2.0/(2*sig_E**2.0)),E)
# Make sure it's normalised to what it was before the smearing
dR_smeared = dR_smeared*trapz(dR,E)/trapz(dR_smeared,E)
return dR_smeared
#------------------------------------------------------------------------------#
#==============================Lab Velocity====================================#
# Peculiar velocity
v_pec = array([11.1,12.2,7.3])
# Earth orbital params
vv_earthrev = 29.79
eccentricity = 0.016722
eccentricity_deg = 0.9574
orb_long_ecliptic = 13.0+1.0
lat_ecl_gal = np.array([-5.5303,59.575,29.812])
long_ecl_gal = np.array([266.141,-13.3485,179.3212])
e1 = array([0.9941,0.1088,0.0042])
e2 = array([-0.0504,0.4946,-0.8677])
w_p = 2*pi/365 # orbital freq.
t1 = 79
ve = 29.79 # Earth's revolution
vrot = 0.47 # Earth's rotation
# Other constants
AstronomicalUnit = 1.49597892e11 # Astronomical Unit
EarthRadius = 6371.01*1000.0 # Earth Radius
Msun = 2.0e30 # Solar mass (kg)
bigG = 6.67e-11*(1.0e3)**(-3)
Jan1 = 2458849.5 # Julian date of January 1 2019
#------------------------------------------------------------------------------#
def LabVelocity(day, Loc=Params.Boulby, v_LSR=233.0):
JD = day+Jan1
lat = Loc.Latitude
lon = Loc.Longitude
# Convert day into phase of Earth rotation t_lab
UT = 24*(JD+0.5-floor(JD+0.5)) #Universal time
MJD = JD - 2400000.5 #Modified Julian Day
T_0 = (floor(MJD)-55197.5)/36525.0
t_GAST = (101.0308 + 36000.770*T_0 + 15.04107*UT)/15.0
t_lab = t_GAST + lon/15
t_lab = 15*t_lab #Lab time in degrees
# Galactic (LSR) Rotation
vtemp = np.array([0.0,v_LSR,0.0])
v_galrot = gal2lab(vtemp,t_lab, lat) #transform to lab co-ords
# Peculiar solar Motion
vtemp1 = v_pec
v_solar = gal2lab(vtemp1,t_lab, lat) # transform to lab co-ords
#Earth's revolution (first calculate in galactic frame then transform)
e = eccentricity
lambda_0 = orb_long_ecliptic
L = 281.0298 + 36000.77*T_0 + 0.04107*UT
g = 357.9258 + 35999.05*T_0 + 0.04107*UT
lambda_sun = L + (1.915 - 0.0048*T_0)*sin(g*pi/180.0)\
+ 0.020*sin(2*g*pi/180.0)
beta = lat_ecl_gal
lambda_i = long_ecl_gal
v_earthrev1 = vv_earthrev*(1-e*sin(pi/180.0*(lambda_sun-lambda_0)))*\
(cos(beta*pi/180.0)*sin(pi/180.0*(lambda_sun-lambda_i)))
v_earthrev = gal2lab(v_earthrev1,t_lab, lat) #transform to lab co-ords
# Earth's rotation (already in lab co-ords)
v_earthrot = 0.465102*cos(lat*pi/180)*np.array([0.0,-1.0,0.0])
# Add them all together (delete as needed)
v_lab = np.array([0.,0.,0.])
v_lab += v_earthrot
v_lab += v_earthrev
v_lab += v_solar
v_lab += v_galrot
return v_lab
def JulianDay(month, day, year, hour): # Calculates time in JD for a given date
year_r = year+4800-floor((14-month)/12.0)
month_r = month+12*floor((14-month)/12.0)-3
JulianDay = day + floor((153*month_r+2)/5.0) + 365*year_r\
+ floor(year_r/4.0) - floor(year_r/100.0)\
+ floor(year_r/400.0) - 32045 + (hour-12.0)/24.0
return JulianDay
def LabVelocitySimple(day,v_LSR=233.0):
# day measured from Jan1
vsun = array([0.0,v_LSR,0.0])+v_pec
v_lab = vsun + EarthVelocity(day)
return v_lab
def EarthVelocity(day):
# Second order in eccentricity
# day measured from Jan1
lambda_p = 102.93*pi/180.0
th = w_p*(day-t1)
v_E = cos(th)*(e1-2*eccentricity*sin(lambda_p)*e2) \
+sin(th)*(e2+2*eccentricity*sin(lambda_p)*e1) \
-eccentricity*(cos(2*th)*(cos(lambda_p)*e1-sin(lambda_p)*e2) \
+sin(2*th)*(sin(lambda_p)*e1+cos(lambda_p)*e2))
return vv_earthrev*v_E
def EarthVector(day):
# Earth's orbital radius vectors
# day measured from Jan1
# Second order in Earth's eccentricity
a_earth = AstronomicalUnit/1.0e3
tp = 3
lamb_p = 102*pi/180
g = w_p*(day-tp)
nu = g + 2.*eccentricity*sin(g)*(5.0/4.0)+eccentricity**2.0*sin(2*g)
r = a_earth*(1-eccentricity**2.0)/(1+eccentricity*cos(nu))
r_earth = r*(-sin(lamb_p+nu)*e1 + cos(lamb_p+nu)*e2)
return r_earth
def v_infinity(v,costh,phi,day):
# v_infinity used for Gravitational focusing, this version uses a set of
# angles costh and phi
# day measured from Jan1
x_earth = EarthVector(day)
r_earth = sqrt(sum(x_earth**2.0))
x_earth /= r_earth # unit vector towards Earth
v_earth = EarthVelocity(day)
uu_esc = 2*bigG*Msun/r_earth # escape speed
vx = v*sqrt(1.0-costh**2.0)*cos(phi)+v_earth[0]
vy = v*sqrt(1.0-costh**2.0)*sin(phi)+v_earth[1]
vz = v*costh+v_earth[2]
vv_inf = (vx**2.0+vy**2.0+vz**2.0)-uu_esc
vv_inf = (vv_inf+abs(vv_inf))/2.0
vdotr = vx*x_earth[0]+vy*x_earth[1]+vz*x_earth[2]
v_inf = sqrt(vv_inf)
denom = vv_inf + 0.5*uu_esc - v_inf*vdotr
v_infx = (vv_inf*vx + 0.5*v_inf*uu_esc*x_earth[0] - v_inf*vx*vdotr)/denom
v_infy = (vv_inf*vy + 0.5*v_inf*uu_esc*x_earth[1] - v_inf*vy*vdotr)/denom
v_infz = (vv_inf*vz + 0.5*v_inf*uu_esc*x_earth[2] - v_inf*vz*vdotr)/denom
return v_infx,v_infy,v_infz
def v_infinity_alt(v3,day):
# v_infinity used for Gravitational focusing, this version uses a set of
# angles cartesian velocity vectors in v3 which defines a Healpix
# discretisation. Tends to be a bit faster and more accurate.
# day measured from Jan1
x_earth = EarthVector(day)
r_earth = sqrt(sum(x_earth**2.0))
x_earth /= r_earth # unit vector towards Earth
v_earth = EarthVelocity(day)
uu_esc = 2*bigG*Msun/r_earth # escape speed
vx = v3[:,0]+v_earth[0] # galactic x-component
vy = v3[:,1]+v_earth[1] # galactic y-component
vz = v3[:,2]+v_earth[2] # galactic z-component
vv_inf = (vx**2.0+vy**2.0+vz**2.0)-uu_esc
vv_inf = (vv_inf+abs(vv_inf))/2.0
#vv_inf[vv_inf<0.0] = 0.0
vdotr = vx*x_earth[0]+vy*x_earth[1]+vz*x_earth[2] # (v.x_earth)
v_inf = sqrt(vv_inf)
denom = vv_inf + 0.5*uu_esc - v_inf*vdotr
v_infx = (vv_inf*vx + 0.5*v_inf*uu_esc*x_earth[0] - v_inf*vx*vdotr)/denom
v_infy = (vv_inf*vy + 0.5*v_inf*uu_esc*x_earth[1] - v_inf*vy*vdotr)/denom
v_infz = (vv_inf*vz + 0.5*v_inf*uu_esc*x_earth[2] - v_inf*vz*vdotr)/denom
return v_infx,v_infy,v_infz
#==========================Solar direction=====================================#
def EarthSunDistance(JD): # Earth-sun distance at Julian Day (JD)
D = JD-2451545.0
g = 357.529 + 0.98560028*D
g = g*pi/180.0
r_es = 1.00014 - 0.01671*cos(g) - 0.00014*cos(2*g)
r_es = r_es*AstronomicalUnit
return r_es
#------------------------------------------------------------------------------#
def SolarDirection(JD,Loc): # Solar direction in lab coords at Julian Day (JD)
lat = Loc.Latitude
lon = Loc.Longitude
# Compute RA and dec of Sun
#JD = day+Jan1
n = JD - 2451545.0
Omega = 2.1429-0.0010394594*n
L = 4.8950630 + 0.017202791698*n
g = 6.2400600 + 0.0172019699*n
ll = L+0.03341607*sin(g) + 0.00034894*sin(2*g)\
- 0.0001134 - 0.0000203*sin(Omega)
ep = 0.4090928 - 6.214e-9*n + 0.0000396*cos(Omega)
ra = np.arctan2((cos(ep)*sin(ll)),cos(ll)) # Right ascension of Sun
dec = np.arcsin(sin(ep)*sin(ll)) # Declination of sun
# Solar vector
x_sun1 = np.array([0.,0.,0.])
x_sun1[0] = cos(dec)*cos(ra)
x_sun1[1] = cos(dec)*sin(ra)
x_sun1[2] = sin(dec)
# Lab time conversion
UT = 24*(JD+0.5-floor(JD+0.5))
MJD = JD - 2400000.5
T_0 = (floor(MJD)-55197.5)/36525.0
t_GAST = (101.0308 + 36000.770*T_0 + 15.04107*UT)/15.0
t_lab = t_GAST + lon/15.0
t_lab = 15*t_lab # DEGREES
# Convert vector from equatorial system into lab system
x_sun = eqt2lab(x_sun1,t_lab,lat)
return x_sun
def EarthSunDistanceMod(JD):
# Solar neutrinos:
# Flux is scaled by 1/EarthSunDistance^2 but since Flux is already averaged
# We need to also divide by Integral(1/R^2) over one year
# Integral_inv_EarthSun_sq is defined in params.f95
Integral_inv_EarthSun_sq = 4.468864372000642e-23 # integral(1/R^2) over 1 year
f = (1.0/Integral_inv_EarthSun_sq)*(1.0/EarthSunDistance(JD)**2.0)
return f
#------------------------------------------------------------------------------#
#==============================================================================#
#---------------------------Coordinate trans.----------------------------------#
def eqt2lab(vp,t_lab,lat): # Equatorial (x_e,y_e,z_e) to Laboratory (N,W,Z)
t = t_lab*pi/180.0
latr = lat*pi/180.0
v = vp*0.0
v[0] = -cos(t)* | sin(latr) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 23:18:15 2018
@author: Tian
"""
import numpy as np
def sigmoid(z):
return 1./(1.+np.exp(-z))
def predict(X, w):
return sigmoid(np.dot(X,w))
__classify= | np.vectorize(lambda pred: 1 if pred>=0.5 else 0) | numpy.vectorize |
from EVT_fitting import*
import scipy as sp
from openmax_utils import compute_distance
import sys
import numpy as np
def computeOpenMaxProbability(openmax_fc8, openmax_score_u, classes=10, channels=1):
""" Convert the scores in probability value using openmax
Input:
---------------
openmax_fc8 : modified FC8 layer from Weibull based computation
openmax_score_u : degree
Output:
---------------
modified_scores : probability values modified using OpenMax framework,
by incorporating degree of uncertainity/openness for a given class
"""
prob_scores, prob_unknowns = [], []
for channel in range(channels):
channel_scores, channel_unknowns = [], []
for category in range(classes):
channel_scores += [sp.exp(openmax_fc8[channel, category])]
total_denominator = sp.sum(sp.exp(openmax_fc8[channel, :])) + sp.exp(sp.sum(openmax_score_u[channel, :]))
if total_denominator is np.inf:
total_denominator = sys.float_info.max
prob_scores += [channel_scores / total_denominator]
prob_unknowns += [sp.exp(sp.sum(openmax_score_u[channel, :])) / total_denominator]
prob_scores = sp.asarray(prob_scores)
prob_unknowns = sp.asarray(prob_unknowns)
scores = sp.mean(prob_scores, axis=0)
unknowns = sp.mean(prob_unknowns, axis=0)
modified_scores = scores.tolist() + [unknowns]
assert len(modified_scores) == (classes + 1)
return modified_scores
def recalibrate_scores(weibull_model, labellist, imgarr,
layer='fc8', alpharank=10, distance_type='eucos', classes=10, channels=1):
"""
Given FC8 features for an image, list of weibull model for each class,
re-calibrate scores
Input:
---------------
weibull_model : pre-computed weibull_model obtained from weibull_tailfitting() function
labellist : ImageNet 2012 labellist
imgarr : features for a particular image extracted using caffe architecture
Output:
---------------
openmax_probab: Probability values for a given class computed using OpenMax
softmax_probab: Probability values for a given class computed using SoftMax (these
were precomputed from caffe architecture. Function returns them for the sake
of convienence)
"""
if alpharank > len(labellist):
alpharank = len(labellist)
imglayer = imgarr[layer]
ranked_list = np.argsort(imgarr['scores']).ravel()[::-1]
alpha_weights = [((alpharank + 1) - i) / float(alpharank) for i in range(1, alpharank + 1)]
ranked_alpha = sp.zeros(1000)
for i in range(len(alpha_weights)):
ranked_alpha[ranked_list[i]] = alpha_weights[i]
# Now recalibrate each fc8 score for each channel and for each class
# to include probability of unknown
openmax_fc8, openmax_score_u = [], []
for channel in range(channels):
channel_scores = imglayer[channel, :]
openmax_fc8_channel = []
openmax_fc8_unknown = []
count = 0
for categoryid in range(classes):
# get distance between current channel and mean vector
category_weibull = query_weibull(labellist[categoryid], weibull_model, distance_type=distance_type)
channel_distance = compute_distance(channel_scores, channel, category_weibull[0],
distance_type=distance_type)
# obtain w_score for the distance and compute probability of the distance
# being unknown wrt to mean training vector and channel distances for
# category and channel under consideration
wscore = category_weibull[2][channel].w_score(channel_distance)
modified_fc8_score = | np.ravel(channel_scores) | numpy.ravel |
import numpy as np
from nn.initializers import *
from nn.operators import *
class Layer(object):
"""
Layer abstraction
"""
def __init__(self, name):
"""Initialization"""
self.name = name
self.training = True # The phrase, if for training then true
self.trainable = False # Whether there are parameters in this layer that can be trained
def forward(self, input):
"""Forward pass, reture output"""
raise NotImplementedError
def backward(self, out_grad, input):
"""Backward pass, return gradient to input"""
raise NotImplementedError
def update(self, optimizer):
"""Update parameters in this layer"""
pass
def set_mode(self, training):
"""Set the phrase/mode into training (True) or tesing (False)"""
self.training = training
def set_trainable(self, trainable):
"""Set the layer can be trainable (True) or not (False)"""
self.trainable = trainable
def get_params(self, prefix):
"""Reture parameters and gradient of this layer"""
return None
####################################################################################
# following layers are mainly for RNN
class Linear(Layer):
def __init__(self, in_features, out_features, name='linear', initializer=Gaussian()):
"""Initialization
# Arguments
in_features: int, the number of input features
out_features: int, the numbet of required output features
initializer: Initializer class, to initialize weights
"""
super(Linear, self).__init__(name=name)
self.linear = linear()
self.trainable = True
self.weights = initializer.initialize((in_features, out_features))
self.bias = np.zeros(out_features)
self.w_grad = np.zeros(self.weights.shape)
self.b_grad = | np.zeros(self.bias.shape) | numpy.zeros |
import numpy as np
from math import pi
import os
from src import RDModes, Config, list_tl_files
import matplotlib.pyplot as plt
plt.style.use('elr')
plt.ion()
fc = 400
z_int = 150.
cf = Config(fc=fc)
tl_files = list_tl_files(fc)
tl_data = np.load(tl_files[23])
r_a = tl_data['rplot']
rd_modes = RDModes(tl_data['c_bg'], tl_data['x_a'], tl_data['z_a'],
cf.fc, cf.z_src, s=None, c_bounds=cf.c_bounds)
xs = tl_data['xs']
dr = (rd_modes.r_plot[-1] - rd_modes.r_plot[0]) / (rd_modes.r_plot.size - 1)
r_max = 60e3
num_r = int(np.ceil(r_max / dr))
r_a_modes = (np.arange(num_r) + 1) * dr
l_len = -2 * pi / (np.diff(np.real(rd_modes.k_bg)) - np.spacing(1))
# reference energy
psi_s = np.exp(1j * pi / 4) / (rd_modes.rho0 * np.sqrt(8 * pi)) \
* rd_modes.psi_ier(rd_modes.z_src)
psi_s /= np.sqrt(rd_modes.k_bg)
psi_s *= 4 * pi
z_a = tl_data['zplot']
dz = (z_a[-1] - z_a[0]) / (z_a.size - 1)
dom_modes = (rd_modes.mode_number == 0) | (rd_modes.mode_number == 1)
# either 3 or 4 selected modes
dom_modes = np.zeros_like(dom_modes)
am = | np.argmax(l_len) | numpy.argmax |
from aux_oampnet2 import get_complete_tensor_model
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
from keras.callbacks import TerminateOnNaN, ModelCheckpoint
import numpy as np
import tensorflow as tf
import hdf5storage
import os
from keras import backend as K
# GPU allocation
K.clear_session()
tf.reset_default_graph()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
# Set global seed
np.random.seed(2020)
# Tensorflow memory allocation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1.
K.tensorflow_backend.set_session(tf.Session(config=config))
# System parameters
num_tx, num_rx = 4, 4
mod_size = 4
# Architecture parameters
num_iterations = 4
# Training parameters
batch_size = 100
num_epochs = 10
learning_rate = 1e-4
# Load bitmaps
contents = hdf5storage.loadmat('constellation%d.mat' % mod_size)
constellation = contents['constellation'] # !!! Has to be swapped for 64-QAM
# Load training data
train_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed1234.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(train_file)
ref_x = np.squeeze(np.asarray(contents['ref_x']))
ref_y = np.squeeze(np.asarray(contents['ref_y']))
ref_h = np.squeeze(np.asarray(contents['ref_h']))
ref_labels = np.squeeze(np.asarray(contents['ref_labels']))
train_snr_array = np.squeeze(np.asarray(contents['snr_range']))
# Load test data
# test_file = 'matlab/data/extended_rayleigh_zf-sic_mimo%dby%d_mod%d_seed9999.mat' % (num_rx, num_tx, mod_size)
test_file = 'matlab/data/extended_rayleigh_ml_mimo%dby%d_mod%d_seed4321.mat' % (num_rx, num_tx, mod_size)
contents = hdf5storage.loadmat(test_file)
ref_x_test = np.squeeze(np.asarray(contents['ref_x']))
ref_y_test = np.squeeze(np.asarray(contents['ref_y']))
ref_h_test = np.squeeze(np.asarray(contents['ref_h']))
ref_labels_test = np.squeeze(np.asarray(contents['ref_labels']))
test_snr_array = np.squeeze(np.asarray(contents['snr_range']))
# For each SNR point
for train_snr_idx, train_snr_value in enumerate(train_snr_array):
# Clear session
K.clear_session()
# Get noise power
sigma_n = 10 ** (-train_snr_value / 10)
# Reshapes
x_train = np.moveaxis(ref_x[train_snr_idx], -1, -2)
x_train = np.reshape(x_train, (-1, num_tx))
y_train = np.moveaxis(ref_y[train_snr_idx], -1, -2)
y_train = np.reshape(y_train, (-1, num_rx))
h_train = np.moveaxis(ref_h[train_snr_idx], -1, -3)
h_train = np.reshape(h_train, (-1, num_rx, num_tx))
# Construct input-x starting at zeroes
x_input_train = np.zeros((y_train.shape[0], num_tx))
# Construct v starting with zero estimate
v_train = (np.square(np.linalg.norm(y_train, axis=-1, keepdims=True)) - num_rx * sigma_n) / np.trace(np.matmul(
np.conj(np.transpose(h_train, axes=(0, 2, 1))), h_train), axis1=-1, axis2=-2)[..., None]
v_train = np.real(v_train)
v_train = np.maximum(v_train, 5e-13)
# Construct tau starting at ones
tau_train = np.ones((y_train.shape[0], 1))
# Split into real/imaginary
x_input_real_train, x_input_imag_train = np.real(x_input_train), np.imag(x_input_train)
x_real_train, x_imag_train = np.real(x_train), np.imag(x_train)
y_real_train, y_imag_train = np.real(y_train), np.imag(y_train)
h_real_train, h_imag_train = np.real(h_train), | np.imag(h_train) | numpy.imag |
#! /usr/bin/python3
# -*- coding:utf-8 -*-
import numpy as np
class Network:
def __init__(self, layers, normalisation):
self.layers = layers
self.do_normalisation = normalisation # true or false
def normalisation(self, Input):
# N = np.amax(Input)
N = 255
if N == 0:
return Input
else:
return Input / N
def feed_forward(self, Input):
self.layers[0].compute(Input)
for i in range(len(self.layers) - 1):
self.layers[i + 1].compute(self.layers[i].output)
return self.layers[-1].output
def compute_F_prim(self, layer_number):
layer = self.layers[layer_number]
F_prim = np.diag(layer.activation.df(layer.activation_level))
return F_prim
def compute_sensibilities(self, error):
sensibilities = []
F_prim = self.compute_F_prim(-1)
sensibilities.append(-2 * F_prim.dot(error))
for k in range(len(self.layers) - 2, -1, -1):
F_prim = self.compute_F_prim(k)
sensibilities.append(F_prim.dot(
self.layers[k+1].weights.transpose()).dot(sensibilities[-1]))
return sensibilities[::-1]
def backpropagation(self, Input, expected_output):
error = expected_output - self.layers[-1].output
sensibilities = self.compute_sensibilities(error)
delta_weights = []
delta_weights.append(-self.layers[0].learning_rate *
np.outer(sensibilities[0], Input))
for i in range(1, len(self.layers)):
delta_weights.append(-self.layers[i].learning_rate *
np.outer(sensibilities[i], self.layers[i-1].output))
delta_bias = []
for i in range(len(self.layers)):
delta_bias.append(self.layers[i].learning_rate * sensibilities[i])
for i in range(len(self.layers)):
self.layers[i].update(delta_weights[i], delta_bias[i])
def learning(self, Input, expected_output):
Input = np.array(Input)
expected_output = np.array(expected_output)
if self.do_normalisation:
Input = self.normalisation(Input)
self.feed_forward(Input)
self.backpropagation(Input, expected_output)
def test(self, Input):
Input = | np.array(Input) | numpy.array |
#!/bin/python
import sympy
from scipy.io import wavfile
import numpy as np
from rich import print
import pretty_errors
import random
from matplotlib import pyplot as plt
import math
import soundfile as sf
#####################################################################################
# Important Variables
#####################################################################################
# number of samples that will be decimated and reconsturcted
samples_to_injest = 50000
downsample_level = 2
assert (samples_to_injest%2==0),"Samples to injest must be an even number!"
assert (downsample_level%2==0),"Downsample level must be an even number!"
#####################################################################################
# DEFINE THE INTERPOLATION FUNCTIONS
#####################################################################################
#
# Each of these will take in the .wav segment, as well as where the 0'd out range
# starts and ends
# This -might- need to know if the file is 8, 16, or 24 bit as well, to compensate
# for the extra byte that numpy adds on 24 bit wavs.
# Keep in mind samples_to_injest is the number of samples both before and after, so
# it needs divided by two to look forward and ahead.
# the range that's interpolated will be
# wav[(zstart - samples_to_injest/2:zstart),:] and wav[zend:zend + samples_to_injest/2,:]
# as we don't want to 'learn' on the range we've just 0'd out.
# BUT we do need to keep in mind the x/time value jump, so that the interpolation
# doesn't think these two ranges are contiuous
def LinearInterpolate(samples_to_injest, zstart, zend):
print("Running Linear Spline Interpolation")
div2interp = np.zeros((int(samples_to_injest/downsample_level)))
i = zstart
j = 0
while i < zstart+samples_to_injest:
div2interp[j] = wav[i]
i += downsample_level
j += 1
xp = np.arange(zstart,zend,downsample_level)
yp = div2interp
xn = np.arange(zstart,zend-downsample_level,1)
linearWav = np.copy(wav)
x = sympy.symbols('x')
y = []
for i in range(1,len(xp)):
y.append (((xp[i] - x) / (xp[i] - xp[i-1]))*yp[i-1] + ((x - xp[i-1])/(xp[i] - xp[i-1]))*yp[i])
for i in range(zstart,zend-downsample_level):
linearWav[i] = y[((i-zstart)//downsample_level)].subs(x,(i))
return linearWav
def QuadInterpolate(samples_to_injest, zstart, zend):
print("Running Quadratic Spline Interpolation")
div2interp = np.zeros((int(samples_to_injest/downsample_level)))
i = zstart
j = 0
while i < zstart+samples_to_injest:
div2interp[j] = wav[i]
i += downsample_level
j += 1
np.set_printoptions(formatter={'int':str})
xp = np.arange(zstart,zend,downsample_level)
yp = div2interp
xn = np.arange(zstart,zend-downsample_level,1)
quadWav = np.copy(wav)
x = sympy.symbols('x')
y = []
z = []
z.append(0)
for i in range(0,len(xp)-1):
z.append ((-1)*z[i] + 2*((yp[i+1]-yp[i])/(xp[i+1]-xp[i])))
for i in range(0, len(xp)-1):
y.append ((((z[i+1]-z[i])/(2*(xp[i+1]-xp[i])))*(x-xp[i])**2)+z[i]*(x-xp[i])+yp[i])
for i in range(zstart,zend-downsample_level):
quadWav[i] = y[((i-zstart)//downsample_level)].subs(x,(i))
return quadWav
def RCubeInterpolate(samples_to_injest, zstart, zend):
print("Running Cubic Spline Interpolation")
div2interp = np.zeros((int(samples_to_injest/downsample_level)))
i = zstart
j = 0
while i < zstart+samples_to_injest:
div2interp[j] = wav[i]
i += downsample_level
j += 1
np.set_printoptions(formatter={'int':str})
xp = np.arange(zstart,zend,downsample_level)
yp = div2interp
rCubeWav = np.copy(wav)
x = sympy.symbols('x')
y = []
b = []
c = np.zeros(len(xp))
d = []
e = []
alp = []
r = 2+math.sqrt(3)
h = downsample_level
e.append(3*r/(2*(h**2))*(yp[1]-yp[0]))
for i in range(1, len(xp)-1):
e.append((3/(h**2))*(yp[i-1]-2*yp[i]+yp[i+1]))
e.append(0)
alp.append(e[0]/r)
for i in range(1, len(xp)-1):
alp.append((e[i]-alp[i-1])/r)
alp.append(0)
for i in reversed(range(0,len(xp)-1)):
c[i] = alp[i]-(c[i+1]/r)
for i in range(0,len(xp)-1):
b.append ((yp[i+1]-yp[i])/h-((2*c[i]+c[i+1])*h)/3)
for i in range(0,len(xp)-1):
d.append((1/(3*h))*(c[i+1]-c[i]))
for i in range(0,len(xp)-1):
y.append (yp[i]+b[i]*x+c[i]*(x**2)+d[i]*(x**3))
for i in range(zstart,zend-downsample_level):
rCubeWav[i] = y[((i-zstart)//downsample_level)].subs(x,(i % downsample_level))
return rCubeWav
#####################################################################################
# MAIN
#####################################################################################
def PlotWavs(length, start, end, mainWav, linearWav, quadWav, rCubeWav):
#TODO save the image
extra_space = 100 #how many samples to show before and after the 0'd out samples
fig, axs = plt.subplots(4,2)
fig.suptitle("Waveform Interpolation")
x = np.arange(0,length+extra_space*2,1)
#base waveform
axs[0,0].set_title("Input Waveform")
axs[0,0].plot(x, mainWav[start-extra_space:end+extra_space])
axs[0,0].axvspan(extra_space, length+extra_space, color='red', alpha=.1)
#interpolated waveforms
axs[0,1].set_title("Linear Spline Interpolation")
axs[0,1].plot(x, linearWav[start-extra_space:end+extra_space], 'tab:orange')
axs[1,0].set_title("Quadratic Spline Interpolation")
axs[1,0].plot(x, quadWav[start-extra_space:end+extra_space], 'tab:green')
axs[1,1].set_title("R-Cubic Spline Interpolation")
axs[1,1].plot(x, rCubeWav[start-extra_space:end+extra_space], 'tab:red')
# Multi Graph Comparison
axs[2,0].set_title("Compare all splines")
axs[2,0].plot(x, mainWav[start-extra_space:end+extra_space]-quadWav[start-extra_space:end+extra_space], 'tab:green')
axs[2,0].plot(x, mainWav[start-extra_space:end+extra_space]-linearWav[start-extra_space:end+extra_space], 'tab:orange')
axs[2,0].plot(x, mainWav[start-extra_space:end+extra_space]-rCubeWav[start-extra_space:end+extra_space], 'tab:red')
#resulting difference
axs[2,1].set_title("Linear Spline Interpolation Difference")
axs[2,1].plot(x, mainWav[start-extra_space:end+extra_space]-linearWav[start-extra_space:end+extra_space], 'tab:orange')
axs[3,0].set_title("Quadratic Spline Interpolation Difference")
axs[3,0].plot(x, mainWav[start-extra_space:end+extra_space]-quadWav[start-extra_space:end+extra_space], 'tab:green')
axs[3,1].set_title("R-Cubic Spline Interpolation Difference")
axs[3,1].plot(x, mainWav[start-extra_space:end+extra_space]-rCubeWav[start-extra_space:end+extra_space], 'tab:red')
for ax in axs.flat:
ax.set(xlabel='Time', ylabel='Amplitude')
for ax in axs.flat:
ax.label_outer()
plt.show()
def SaveWavs(linearWav,quadWav,rCubeWav):
print("You can now go listen to the file to determine the quality of interpolation")
# input24.wav is 24bit signed pcm, input16 is signed 16 bit, input8 is unsigned 8bit
input_wav = 'NATEST24.wav'
# Get the wave file data
samplerate, wav = wavfile.read(input_wav)
print(samplerate)
tempwav = np.zeros((wav.shape[0]))
tempwav = wav[:,0]
wav = tempwav
print(wav)
| np.set_printoptions(formatter={'int':hex}) | numpy.set_printoptions |
"""This module sets up a random walk system then runs a simulation using
the given resampler. This profiles the performance of the resampler
using the data produced in the simulation.
The simulation data is saved using the WepyHDF5 reporter. To create a
WepyHDF5 reporter we create a dummy topology for the random walk
system. This dummy system consists of one atom with a position vector
in an N-dimensional space.
The resmapler quality is measured using the following values:
- P(x): The average predicted probability at position x.
- Accuracy: This is calculated using the target probability and predicted
probabilities of walkers at position x.
- Range: The maximum range that is observed by the given resampler is calculated
by determining the largest x values visited along each dimension,
then averaging them.
You can find detailed information about random walk parameters in the papers:
"WExplore: Hierarchical Exploration of High-Dimensional Spaces
Using the Weighted Ensemble Algorithm" and
"REVO: Resampling of Ensembles by Variation Optimization".
"""
import os
import sys
import json
import numpy as np
import pandas as pd
import h5py
import mdtraj as mdj
from wepy.resampling.resamplers.resampler import NoResampler
from wepy.work_mapper.mapper import Mapper
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.sim_manager import Manager
from wepy.walker import Walker, WalkerState
from wepy.runners.randomwalk import RandomWalkRunner, UNIT_NAMES
from wepy.hdf5 import WepyHDF5
from wepy.util.mdtraj import mdtraj_to_json_topology
PRECISION = 3
SAVE_FIELDS = ('positions',)
UNITS=UNIT_NAMES
np.set_printoptions(precision=PRECISION)
class RandomwalkProfiler(object):
"""A class to implement RandomWalkProfilier."""
RANDOM_WALK_TEMPLATE=\
"""* Random walk simulation:
-Number of runs: {n_runs}
-Number of cycles: {n_cycles}
-Number of walkers:{n_walkers}
-Move-forward probability:{prob}
-Dimension:{dimension}
"""
RUN_RESULTS_TEMPLATE=\
"""* Run {run_idx} results:
-Maximum range: {max_range}
-Maximum range of dimensions:
{max_dim_range}
-Accuracy: {accuracy}
-Average Predicted probability:
{predicted_probabilty}
"""
def __init__(self, resampler=None, dimension=None, probility=0.25,
hdf5_filename='rw_results.wepy.h5',
reporter_filename='randomwalk.org'):
"""Constructor for RandomwalkProfiler.
Parameters
----------
resampler:
The dimension of the random walk space.
(Default = 2)
probabilty: float
"Probability" is defined here as the forward-move
probability only. The backward-move probability is
1-probability.
(Default = 0.25)
"""
assert resampler is not None, "Resampler object must be given."
self.resampler = resampler
assert dimension is not None, "The dimension of random walk must be given."
self.dimension = dimension
self.probability = probility
self.hdf5_filename = hdf5_filename
self.reporter_filename = reporter_filename
def generate_topology(self):
"""Creates a one-atom, dummy trajectory and topology for
the randomwalk system using the mdtraj package. Then creates a
JSON format for the topology. This JSON string is used in making
the WepyHDF5 reporter.
Returns
-------
topology: str
JSON string representing the topology of system being simulated.
"""
n_atoms = 1
data = []
for i in range(n_atoms):
data.append(dict(serial=i, name="H", element="H",
resSeq=i + 1, resName="UNK", chainID=0))
data = pd.DataFrame(data)
xyz = np.zeros((1, 1, 3))
unitcell_lengths = 0.943 * np.ones((1, 3))
unitcell_angles = 90 * np.ones((1, 3))
top = mdj.Topology.from_dataframe(data, bonds=np.zeros((0, 2), dtype='int'))
json_top_str = mdtraj_to_json_topology(top)
return json_top_str
def run(self, num_runs=1, num_cycles=200, num_walkers=100):
"""Runs a random walk simulation and profiles the resampler
performance.
Parameters
----------
num_runs: int
The number independet simulations.
num_cycles: int
The number of cycles that will be run in the simulation.
num_walkers: int
The number of walkers.
"""
# set the random walk simulation repreter string
randomwalk_string = self.RANDOM_WALK_TEMPLATE.format(
n_runs=num_runs,
n_cycles=num_cycles,
n_walkers=num_walkers,
prob=self.probability,
dimension=self.dimension
)
# calls the runner
self._run(num_runs, num_cycles, num_walkers)
# calls the profiler
self.analyse(randomwalk_string)
def _run(self, num_runs, num_cycles, num_walkers):
"""Runs a random walk simulation.
Parameters
----------
num_runs: int
The number independet simulations.
num_cycles: int
The number of cycles that will be run in the simulation.
num_walkers: int
The number of walkers.
"""
print("Random walk simulation with: ")
print("Dimension = {}".format(self.dimension))
print("Probability = {}".format(self.probability))
print("Number of Walkers = {}".format(num_walkers))
print("Number of Cycles ={}".format(num_cycles))
# set up initial state for walkers
positions = np.zeros((1, self.dimension))
init_state = WalkerState(positions=positions, time=0.0)
# create list of init_walkers
initial_weight = 1/num_walkers
init_walkers = []
init_walkers = [Walker(init_state, initial_weight)
for i in range(num_walkers)]
# set up raunner for system
runner = RandomWalkRunner(probability=self.probability)
units = dict(UNIT_NAMES)
# instantiate a revo unbindingboudaryconditiobs
segment_length = 10
# set up the reporter
randomwalk_system_top_json = self.generate_topology()
hdf5_reporter = WepyHDF5Reporter(file_path=self.hdf5_filename,
mode='w',
save_fields=SAVE_FIELDS,
topology=randomwalk_system_top_json,
resampler=self.resampler,
units=dict(UNITS),
n_dims=self.dimension)
# running the simulation
sim_manager = Manager(init_walkers,
runner=runner,
resampler=self.resampler,
work_mapper=Mapper(),
reporters=[hdf5_reporter])
# run a simulation with the manager for n_steps cycles of length 1000 each
steps = [segment_length for i in range(num_cycles)]
### RUN the simulation
for run_idx in range(num_runs):
print("Starting run: {}".format(run_idx))
sim_manager.run_simulation(num_cycles, steps)
print("Finished run: {}".format(run_idx))
print("Finished Simulation")
def kronecker_delta(self, x):
"""Implements the the Kronecker delta function.
Parameters
----------
x: int
Input value of the function. Here, this is the random walk position.
Returns
-------
y: int
The output of the the Kronecker delta function.
"""
if x == 0:
return 1
else:
return 0
# Measure accuracy
def accuracy(self, x, Px):
"""Calculate the accuracy at position x.
Parameters
----------
x: int
The position.
Returns
-------
accuracy: float
The value that specifies how accurate the resampler is at point x.
The highest accuracy is achived when P(X) = Pt(x).
"""
if Px == 0:
return 0
elif | np.log(Px) | numpy.log |
from meta_learner_utils import feature_selection, dnn_model, reset_weights, feature_selection_test_set, task_specific_features, normalize_metafeatures, add_task_specific_metafeatures, preprocess_metalabels, preprocess_metafeatures, preprocess_metafeatures_test_set, regression_feature_selection, largest_indices
from visualization import visualize_features_tSNE, visualize_features_MDS, visualize_confusion_matrix, visualize_features_PCA
from medical_metafeatures.feature_extraction import MetaFeatureExtraction
from tqdm import tqdm
import os
import numpy as np
from sklearn.svm import SVR
import keras.backend as K
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from scipy.stats import spearmanr
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
tasks_list= ['Task01_BrainTumour','Task02_Heart','Task03_Liver','Task04_Hippocampus', 'Task05_Prostate', 'Task06_Lung', 'Task07_Pancreas', 'Task08_HepaticVessel', 'Task09_Spleen', 'Task10_Colon','Task11_CHAOSLiver', 'Task12_LITSLiver','Task13_ACDCHeart']
participants = ['BCVuniandes', 'beomheep', 'CerebriuDIKU', 'EdwardMa12593', 'ildoo', 'iorism82', 'isarasua', 'Isensee', 'jiafucang', 'lesswire1', 'lupin', 'oldrich.kodym', 'ORippler', 'phil666', 'rzchen_xmu', 'ubilearn', 'whale', '17111010008', 'allan.kim01']
nr_of_methods = 19
feature_extractors = ['STAT','VGG16','ResNet50','MobileNetV1']
regression_methods = ['SVR','DNN']
best_frac = {'STAT': 0.37,'VGG16': 0.23,'ResNet50': 0.49,'MobileNetV1': 0.49}
meta_subset_size = 20
sample_size = 100
visualization = False
do_feature_selection = True
gt_labels = np.load('metadata/decathlon_avgstd_results.npy')
# load the meta_features and load_meta_labels
meta_features = {}
meta_labels = {}
for fe_id, fe in enumerate(feature_extractors):
pred = np.zeros((len(feature_extractors), len(regression_methods), len(tasks_list[:10]),2,19))
results_task = np.zeros((len(feature_extractors), len(regression_methods), len(tasks_list[:10]),3))
results_method = np.zeros((len(feature_extractors), nr_of_methods, len(tasks_list[:10]),3))
print(fe)
for task_id, task in enumerate(tasks_list):
m = MetaFeatureExtraction(task, meta_subset_size, fe)
m.load_meta_labels()
m.load_meta_features()
if fe != 'STAT': m.sum_and_log_meta_features()
if task_id < 10 : meta_labels[task] = m.meta_labels
meta_features[task] = m.meta_features
if fe == 'STAT':
regression_features_raw = np.zeros((len(tasks_list[:10])*sample_size, 38))
regression_labels = np.zeros((len(tasks_list[:10])*sample_size, 19))
for task_id, task in enumerate(tasks_list[:10]):
for nr in range(sample_size):
if meta_subset_size == 20:
regression_features_raw[task_id*sample_size+nr,:33] = meta_features[task][nr,:]
regression_features_raw[task_id*sample_size+nr,33:] = task_specific_features[task_id,:]
regression_labels[task_id*sample_size+nr,:] = meta_labels[task][nr,:]
else:
regression_features_raw[task_id*sample_size+nr,:33] = np.sum(meta_features[task][nr,:],axis=0)/meta_features[task][nr,:].shape[0]
regression_features_raw[task_id*sample_size+nr,33:] = task_specific_features[task_id,:]
regression_labels[task_id*sample_size+nr,:] = np.sum(meta_labels[task][nr,:], axis=0)/meta_features[task][nr,:].shape[0]
else:
# compose the regression features and labels
_, regression_features_raw, usable_filters = preprocess_metafeatures(meta_features, tasks_list[:10], sample_size)
regression_labels = preprocess_metalabels(meta_labels, tasks_list[:10], sample_size)
regression_features_raw = add_task_specific_metafeatures(regression_features_raw, task_specific_features, tasks_list[:10], sample_size)
regression_features = normalize_metafeatures(regression_features_raw)
for task_id, task in tqdm(enumerate(tasks_list[:10])):
nr_of_metafeatures = regression_features.shape[1]
test_set = [task_id]
train_set = list(set(range(10))-set(test_set))
test_set.sort()
train_set.sort()
train_regression_features = np.zeros((9 * sample_size, nr_of_metafeatures))
train_regression_labels = np.zeros((9 * sample_size, nr_of_methods))
test_regression_features = np.zeros((1 * sample_size, nr_of_metafeatures))
test_regression_labels = np.zeros((1 * sample_size, nr_of_methods))
for i, task in enumerate(train_set):
train_regression_features[i*sample_size:(i+1)*sample_size,:] = regression_features[task*sample_size:(task+1)*sample_size,:]
train_regression_labels[i*sample_size:(i+1)*sample_size,:] = regression_labels[task*sample_size:(task+1)*sample_size,:]
for i, task in enumerate(test_set):
test_regression_features[i*sample_size:(i+1)*sample_size,:] = regression_features[task*sample_size:(task+1)*sample_size,:]
test_regression_labels[i*sample_size:(i+1)*sample_size,:] = regression_labels[task*sample_size:(task+1)*sample_size,:]
if do_feature_selection:
train_regression_features, features_to_keep = feature_selection(train_regression_features, train_regression_labels, best_frac[fe_id])
test_regression_features = feature_selection_test_set(test_regression_features, features_to_keep)
for reg_id, regression_method in enumerate(regression_methods):
for p in range(19):
if regression_method == 'DNN':
K.clear_session()
model = dnn_model(train_regression_features.shape[1])
np.random.seed(42)
np.random.shuffle(train_regression_features)
np.random.seed(42)
| np.random.shuffle(train_regression_labels) | numpy.random.shuffle |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tree_util.py`."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import tree_util
NUM_NESTS = 5
class TreeUtilTest(absltest.TestCase):
def test_tree_split_key(self):
rng_key = jax.random.PRNGKey(42)
tree_like = (1, (2, 3), {'a': 4})
_, tree_keys = tree_util.tree_split_key(rng_key, tree_like)
self.assertLen(jax.tree_leaves(tree_keys), 4)
def test_tree_map_zipped(self):
nests = [
dict(a=jnp.zeros((1, 3)), b=jnp.zeros((1, 5)))] * NUM_NESTS
nest_output = tree_util.tree_map_zipped(
lambda *args: jnp.concatenate(args), nests)
self.assertEqual(nest_output['a'].shape, (NUM_NESTS, 3))
self.assertEqual(nest_output['b'].shape, (NUM_NESTS, 5))
def test_tree_map_zipped_wrong_structure(self):
nests = [
dict(a=jnp.zeros((1, 3)), b=jnp.zeros((1, 5)))] * (NUM_NESTS - 1)
nests.append(dict(c=jnp.zeros((1, 3)))) # add a non-matching nest
with self.assertRaisesRegex(ValueError, 'must share the same tree'):
tree_util.tree_map_zipped(
lambda *args: jnp.concatenate(args), nests)
def test_tree_map_zipped_empty(self):
outputs = tree_util.tree_map_zipped(lambda *args: jnp.concatenate(args), [])
self.assertEmpty(outputs)
def test_select_true(self):
on_true = ((jnp.zeros(3,),), jnp.zeros(4,))
on_false = ((jnp.ones(3,),), jnp.ones(4,))
output = tree_util.tree_select(True, on_true, on_false)
chex.assert_tree_all_close(output, on_true)
def test_select_false(self):
on_true = ((jnp.zeros(3,),), jnp.zeros(4,))
on_false = ((jnp.ones(3,),), jnp.ones(4,))
output = tree_util.tree_select(False, on_true, on_false)
chex.assert_tree_all_close(output, on_false)
def test_tree_split_leaves(self):
t = {
'a0': | np.zeros(3) | numpy.zeros |
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc
import matplotlib as mpl
import re
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import numpy as np
import math
import matplotlib.gridspec as gridspec
#print ("MPL version ")
#print (matplotlib.__version__)
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.unicode']=True
mpl.rcParams["figure.figsize"] = [6.4,6.4]
label_size1=15
RDdata = np.genfromtxt('Results/datToPlotCAGC/QFCstat_all_filenames.dat',dtype=('U50',None,None,None,None,None,None),delimiter="\t");
ml = 102
RDM = (ml,ml)
RDM = np.zeros(RDM)
print (RDM)
print (RDdata)
for i in range(len(RDdata)):
RDij = re.findall('(\d+)\s+[A-Z]{3}\s+to\s+(\d+)\s+[A-Z]{3}',RDdata[i][0])
RDM[(int(RDij[0][0])-1)][(int(RDij[0][1])-1)] = RDdata[i][2]
RDM[(int(RDij[0][1])-1)][(int(RDij[0][0])-1)] = RDdata[i][5]
print (RDM[(int(RDij[0][0])-1)][(int(RDij[0][1])-1)])
print (RDij[0][0]+' '+RDij[0][1])
print ('I=%d'%(i)+'\n')
RDM_a = np.array(RDM)
Sum_RDM = | np.sum(RDM_a) | numpy.sum |
import numpy as np
import os
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from .networks import BC, Embedding, AutoEncoder
from .training import update, evaluate
from .datasets import BCSet, StateActionEmbeddingSet, AESet
from .utils import entropy, BColors
from hyperloglog import HyperLogLog
import matplotlib.pyplot as plt
from math import sqrt
import copy
from .plotting import plot_histograms, plot_states
from .latex import create_latex_table
class Evaluator():
def __init__(self,
environment: str,
buffer_type: str,
states:np.ndarray,
actions:np.ndarray,
rewards:np.ndarray,
dones:np.ndarray,
workers=0,
seed=42,
num_actions=None):
assert len(states.shape) == 2, f"States must be of dimension (ds_size, feature_size), were ({states.shape})"
if len(actions.shape) == 1:
actions = actions.reshape(-1, 1)
assert len(actions.shape) == 2, f"Actions must be of dimension (ds_size, 1), were ({actions.shape})"
if len(rewards.shape) == 1:
rewards = rewards.reshape(-1, 1)
assert len(rewards.shape) == 2, f"Rewards must be of dimension (ds_size, 1), were ({actions.shape})"
if len(dones.shape) == 1:
dones = dones.reshape(-1, 1)
assert len(dones.shape) == 2, f"Dones must be of dimension (ds_size, 1), were ({actions.shape})"
# task information
self.environment = environment
self.buffer_type = buffer_type
# Dataset, last state and actions are meaningless
self.states = states
self.actions = actions
self.rewards = rewards
self.dones = dones
# auxiliary parameters
self.workers = workers
self.seed = seed
# could be that dataset contains not every action, then one can pass the correct number of actions
self.num_actions = num_actions if num_actions is not None else np.max(self.actions) + 1
device = "cuda" if torch.cuda.is_available() else "cpu"
# behavioral cloning network
self.behavioral_trained = False
self.behavioral = BC(num_state=self.states.shape[1], num_actions=self.num_actions, seed=self.seed).to(device)
# state embedding network
self.state_embedding_trained = False
self.state_embedding = Embedding(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state-action embedding network
self.state_action_embedding_trained = False
self.state_action_embedding = Embedding(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state ae network
self.state_ae_trained = False
self.state_ae = AutoEncoder(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# state-action ae network
self.state_action_ae_trained = False
self.state_action_ae = AutoEncoder(num_state=self.states.shape[1], num_embedding=2, seed=self.seed).to(device)
# copies that stay random
self.random_state_embedding = copy.deepcopy(self.state_embedding)
self.random_state_action_embedding = copy.deepcopy(self.state_action_embedding)
# limits for estimation
self.limits = [None] * 8
def evaluate(self, state_limits=None, action_limits=None,
epochs=10, batch_size=64, lr=1e-3,
subsample=1., verbose=False):
assert 0 <= subsample <= 1, f"subsample must be in [0;1] but is {subsample}."
self.train_behavior_policy(epochs, batch_size, lr, verbose)
returns = self.get_returns()
sparsities = self.get_sparsities()
ep_lengths = self.get_episode_lengths()
entropies = self.get_bc_entropy()
unique_states = self.get_unique_states(limits=state_limits)
unique_state_actions = self.get_unique_state_actions(limits=action_limits)
print("-"*50)
print("Min / Mean / Max Return: \t\t", f"{round(np.min(returns), 2)} / {round(np.mean(returns), 2)} "
f"/ {round(np.max(returns), 2)}")
print("Unique States: \t", f"{unique_states}")
print("Unique State-Actions: \t", f"{unique_state_actions}")
print("Min / Mean / Max Entropy: \t", f"{round(np.min(entropies), 2)} / {round(np.mean(entropies), 2)} "
f"/ {round(np.max(entropies), 2)}")
print("Min / Mean / Max Sparsity: \t", f"{round(np.min(sparsities), 2)} / "
f"{round(np.mean(sparsities), 2)} "
f"/ {round(np.max(sparsities), 2)}")
print("Min / Mean / Max Episode Length: \t", f"{round(np.min(ep_lengths), 2)} / "
f"{round(np.mean(ep_lengths), 2)} "
f"/ {round(np.max(ep_lengths), 2)}")
print("-" * 50)
return returns, unique_states, unique_state_actions, entropies, sparsities, ep_lengths
def get_returns(self):
rewards, ep_reward = list(), 0
for i, done in enumerate(self.dones):
ep_reward += self.rewards[i].item()
if done:
rewards.append(ep_reward)
ep_reward = 0
return rewards
@staticmethod
def get_normalized_rewards(rewards, random_reward, optimal_reward):
normalized_reward = []
for reward in rewards:
normalized_reward.append((reward - random_reward) / (optimal_reward - random_reward))
return normalized_reward
def get_sparsities(self):
sparsity, num_not_obtained = list(), list()
for i, done in enumerate(self.dones):
num_not_obtained.append(self.rewards[i].item() == 0)
if done:
sparsity.append(np.mean(num_not_obtained))
num_not_obtained = list()
return sparsity
def get_episode_lengths(self):
lengths, ep_length = list(), 0
for i, done in enumerate(self.dones):
ep_length += 1
if done:
lengths.append(ep_length)
ep_length = 0
return lengths
def get_bc_entropy(self):
if not self.behavioral_trained:
print(BColors.WARNING + "Attention, behavioral policy was not trained before calling get_bc_entropy!" + BColors.ENDC)
entropies = []
dl = DataLoader(BCSet(states=self.states, actions=self.actions), batch_size=512, drop_last=False,
shuffle=False, num_workers=self.workers)
for x, _ in dl:
x = x.to(next(self.behavioral.parameters()).device)
entropies.extend(entropy(self.behavioral(x)))
# calculate entropy
entropies = np.asarray(entropies)
return entropies
def get_similarity_distance(self):
states = torch.FloatTensor(self.states)[:len(self.dones)]
with torch.no_grad():
states = states.to(next(self.behavioral.parameters()).device)
states = self.state_embedding.embed(states).cpu().numpy()
rng = np.random.default_rng(self.seed)
ep_distances = []
general_distances = []
dones = []
for d, done in enumerate(self.dones):
if done:
dones.append(d + 1)
start = 0
for end in dones:
ep_states = states[start:end]
for s, state in enumerate(ep_states):
idx = rng.integers(len(ep_states))
# in case the same state is sampled by chance
while idx == s or np.allclose(state, ep_states[idx]):
idx = rng.integers(len(ep_states))
if len(ep_states) == 1:
break
if np.allclose(state, ep_states[idx]):
continue
distance = (state - ep_states[idx]).reshape(-1,)
ep_distances.append(np.linalg.norm(distance))
start = end
for s, state in enumerate(states):
idx = rng.integers(len(states))
# in case the same state is sampled by chance
while idx == s:
idx = rng.integers(len(states))
distance = (state - states[idx]).reshape(-1, )
general_distances.append(np.linalg.norm(distance))
return np.mean(general_distances) / np.mean(ep_distances)
def get_state_pseudo_coverage(self, no_cells=100, use_random=False):
states = torch.FloatTensor(self.states)[:len(self.dones)]
with torch.no_grad():
if use_random:
states = states.to(next(self.random_state_embedding.parameters()).device)
states = self.random_state_embedding.embed(states).cpu().numpy()
if self.limits[4] is None:
self.limits[4] = ( | np.min(states[:, 0]) | numpy.min |
"""
===============
emc2.core.Model
===============
This module contains the Model class and example Models for your use.
"""
import xarray as xr
import numpy as np
from act.io.armfiles import read_netcdf
from .instrument import ureg, quantity
from netCDF4 import Dataset
try:
from wrf import tk, getvar, ALL_TIMES
WRF_PYTHON_AVAILABLE = True
except ImportError:
WRF_PYTHON_AVAILABLE = False
class Model():
"""
This class stores the model specific parameters for the radar simulator.
Attributes
----------
Rho_hyd: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the density of said hydrometeors in :math:`kg\ m^{-3}`
fluffy: dict
A dictionary whose keys are the names of the model's ice hydrometeor classes and
whose values are the ice fluffiness factor for the fwd calculations using r_e,
where values of 0 - equal volume sphere, 1 - fluffy sphere i.e., diameter = maximum dimension.
lidar_ratio: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the lidar_ratio of said hydrometeors.
vel_param_a: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the :math:`a` parameters to the equation :math:`V = aD^b` used to
calculate terminal velocity corresponding to each hydrometeor.
vel_param_b: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the :math:`b` parameters to the equation :math:`V = aD^b` used to
calculate terminal velocity corresponding to each hydrometeor.
N_field: dict
A dictionary whose keys are the names of the model's hydrometeor classes and
whose values are the number concentrations in :math:`cm^{-3}` corresponding to
each hydrometeor class.
T_field: str
A string containing the name of the temperature field in the model.
q_field: str
A string containing the name of the water vapor mixing ratio field (in kg/kg) in the model.
p_field: str
A string containing the name of the pressure field (in mbar) in the model.
z_field: str
A string containing the name of the height field (in m) in the model.
conv_frac_names: dict
A dictionary containing the names of the convective fraction corresponding to each
hydrometeor class in the model.
strat_frac_names: dict
A dictionary containing the names of the stratiform fraction corresponding to each
hydrometeor class in the model.
conv_frac_names_for_rad: dict
A dictionary containing the names of the convective fraction corresponding to each
hydrometeor class in the model for the radiation scheme.
strat_frac_names_for_rad: dict
A dictionary containing the names of the stratiform fraction corresponding to each
hydrometeor class in the model for the radiation scheme.
conv_re_fields: dict
A dictionary containing the names of the effective radii of each convective
hydrometeor class
strat_re_fields: dict
A dictionary containing the names of the effective radii of each stratiform
hydrometeor class
time_dim: str
The name of the time dimension in the model.
height_dim: str
The name of the height dimension in the model.
model_name: str
The name of the model (used for plotting).
x_dim: str
The name of the x dimension of the model.
y_dim: str
The name of the y dimension of the model.
"""
def __init__(self):
self.Rho_hyd = {}
self.fluffy = {}
self.lidar_ratio = {}
self.LDR_per_hyd = {}
self.vel_param_a = {}
self.vel_param_b = {}
self.q_names_convective = {}
self.q_names_stratiform = {}
self.N_field = {}
self.T_field = ""
self.q_field = ""
self.p_field = ""
self.z_field = ""
self.qp_field = {}
self.conv_frac_names = {}
self.strat_frac_names = {}
self.conv_frac_names_for_rad = {}
self.strat_frac_names_for_rad = {}
self.conv_re_fields = {}
self.strat_re_fields = {}
self.ds = None
self.time_dim = "time"
self.height_dim = "height"
self.model_name = "empty_model"
self.x_dim = None
self.y_dim = None
self.lat_name = None
self.lon_name = None
self.consts = {"c": 299792458.0, # m/s
"R_d": 287.058, # J K^-1 Kg^-1
"g": 9.80665, # m/s^2
"Avogadro_c": 6.022140857e23,
"R": 8.3144598} # J K^-1 mol^-1
def _add_vel_units(self):
for my_keys in self.vel_param_a.keys():
self.vel_param_a[my_keys] = self.vel_param_a[my_keys] * (
ureg.meter ** (1 - self.vel_param_b[my_keys].magnitude) / ureg.second)
def _prepare_variables(self):
for variable in self.ds.variables.keys():
attrs = self.ds[variable].attrs
try:
self.ds[variable] = self.ds[variable].astype('float64')
except TypeError:
continue
self.ds[variable].attrs = attrs
def _crop_time_range(self, time_range):
"""
Crop model output time range.
Can significantly cut subcolumn processing time.
Parameters
----------
time_range: tuple, list, or array, typically in datetime64 format
Two-element array with starting and ending of time range.
"""
time_ind = np.logical_and(self.ds[self.time_dim] >= time_range[0],
self.ds[self.time_dim] < time_range[1])
if np.sum(time_ind) == 0:
self.ds.close()
print("The requested time range: {0} to {1} is out of the \
model output range; Ignoring crop request.".format(time_range[0], time_range[1]))
else:
self.ds = self.ds.isel({self.time_dim: time_ind})
@property
def hydrometeor_classes(self):
"""
The list of hydrometeor classes.
"""
return list(self.N_field.keys())
@property
def num_hydrometeor_classes(self):
"""
The number of hydrometeor classes
"""
return len(list(self.N_field.keys()))
@property
def num_subcolumns(self):
"""
Gets the number of subcolumns in the model. Will
return 0 if the number of subcolumns has not yet been set.
"""
if 'subcolumn' in self.ds.dims.keys():
return self.ds.dims['subcolumn']
else:
return 0
@num_subcolumns.setter
def num_subcolumns(self, a):
"""
This will set the number of subcolumns in the simulated radar output.
This is a handy shortcut for setting the number of subcolumns if you
do not want to use any of the functions in the simulator module to
do so.
"""
subcolumn = xr.DataArray(np.arange(a), dims='subcolumn')
self.ds['subcolumn'] = subcolumn
def subcolumns_to_netcdf(self, file_name):
"""
Saves all of the simulated subcolumn parameters to a netCDF file.
Parameters
----------
file_name: str
The name of the file to save to.
"""
# Set all relevant variables to save:
vars_to_keep = ["sub_col", "subcol", "strat_", "conv_", "_tot", "_ext", "_mask", "_min", "mpr", "fpr"]
var_dict = {}
for my_var in self.ds.variables.keys():
if np.any([x in my_var for x in vars_to_keep]):
var_dict[my_var] = self.ds[my_var]
out_ds = xr.Dataset(var_dict)
out_ds.to_netcdf(file_name)
def load_subcolumns_from_netcdf(self, file_name):
"""
Load all of the subcolumn data from a previously saved netCDF file.
The dataset being loaded must match the current number of subcolumns if there are any
generated.
Parameters
----------
file_name: str
Name of the file to save.
"""
my_file = xr.open_dataset(file_name)
self.ds = xr.merge([self.ds, my_file])
my_file.close()
class ModelE(Model):
def __init__(self, file_path, time_range=None):
"""
This loads a ModelE simulation with all of the necessary parameters for EMC^2 to run.
Parameters
----------
file_path: str
Path to a ModelE simulation.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 250. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.height_dim = "p"
self.time_dim = "time"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmcr', 'ci': 'cldmcr',
'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldssr', 'ci': 'cldssr',
'pl': 'cldssr', 'pi': 'cldssr'}
self.conv_re_fields = {'cl': 're_mccl', 'ci': 're_mcci', 'pi': 're_mcpi', 'pl': 're_mcpl'}
self.strat_re_fields = {'cl': 're_sscl', 'ci': 're_ssci', 'pi': 're_sspi', 'pl': 're_sspl'}
self.q_names_convective = {'cl': 'QCLmc', 'ci': 'QCImc', 'pl': 'QPLmc', 'pi': 'QPImc'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.ds = read_netcdf(file_path)
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not an SCM run. EMC^2 will only work with SCM runs." % file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
if np.issubdtype(time_range.dtype, np.datetime64):
super()._crop_time_range(time_range)
else:
raise RuntimeError("input time range is not in the required datetime64 data type")
# ModelE has pressure units in mb, but pint only supports hPa
self.ds["p_3d"].attrs["units"] = "hPa"
self.model_name = "ModelE"
class E3SM(Model):
def __init__(self, file_path, time_range=None):
"""
This loads an E3SM simulation output with all of the necessary parameters for EMC^2 to run.
Parameters
----------
file_path: str
Path to an E3SM simulation.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 250. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "Q"
self.N_field = {'cl': 'NUMLIQ', 'ci': 'NUMICE', 'pl': 'NUMRAI', 'pi': 'NUMSNO'}
self.p_field = "p_3d"
self.z_field = "Z3"
self.T_field = "T"
self.height_dim = "lev"
self.time_dim = "ncol"
self.conv_frac_names = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.strat_frac_names = {'cl': 'CLOUD', 'ci': 'CLOUD', 'pl': 'CLOUD', 'pi': 'CLOUD'}
self.conv_frac_names_for_rad = {'cl': 'zeros_cf', 'ci': 'zeros_cf',
'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.strat_frac_names_for_rad = {'cl': 'CLOUD', 'ci': 'CLOUD',
'pl': 'CLOUD', 'pi': 'CLOUD'}
self.conv_re_fields = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pi': 'zeros_cf', 'pl': 'zeros_cf'}
self.strat_re_fields = {'cl': 'AREL', 'ci': 'AREI', 'pi': 'ADSNOW', 'pl': 'ADRAIN'}
self.q_names_convective = {'cl': 'zeros_cf', 'ci': 'zeros_cf', 'pl': 'zeros_cf', 'pi': 'zeros_cf'}
self.q_names_stratiform = {'cl': 'CLDLIQ', 'ci': 'CLDICE', 'pl': 'RAINQM', 'pi': 'SNOWQM'}
self.ds = read_netcdf(file_path)
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not a column dataset. EMC^2 will currently works with column data." %
file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
if np.issubdtype(time_range.dtype, np.datetime64):
super()._crop_time_range(time_range)
else:
raise RuntimeError("input time range is not in the required datetime64 data type")
self.ds[self.p_field] = (self.ds["P0"] * self.ds["hyam"] + self.ds["PS"] * self.ds["hybm"]).T / 1e2 # hPa
self.ds[self.p_field].attrs["units"] = "hPa"
self.ds["zeros_cf"] = xr.DataArray(np.zeros_like(self.ds[self.p_field].values),
dims=self.ds[self.p_field].dims)
self.ds["zeros_cf"].attrs["long_name"] = "An array of zeros as only strat output is used for this model"
for hyd in ["pl", "pi"]:
self.ds[self.strat_re_fields[hyd]].values /= 2 # Assuming effective diameter was provided
self.ds["rho_a"] = self.ds[self.p_field] * 1e2 / (self.consts["R_d"] * self.ds[self.T_field])
self.ds["rho_a"].attrs["units"] = "kg / m ** 3"
for hyd in ["cl", "ci", "pl", "pi"]:
self.ds[self.N_field[hyd]].values *= self.ds["rho_a"].values # convert from mass number to number
self.model_name = "E3SM"
class WRF(Model):
def __init__(self, file_path,
z_range=None, time_range=None, w_thresh=1,
t=None):
"""
This load a WRF simulation and all of the necessary parameters from
the simulation.
Parameters
----------
file_path: str
Path to WRF simulation.
time_range: tuple or None
Start and end time to include. If this is None, the entire
simulation will be included.
z_range: numpy array or None
The z levels of the vertical grid you want to use. By default,
the levels are 0 m to 15000 m, increasing by 500 m.
w_thresh: float
The threshold of vertical velocity for defining a grid cell
as convective.
t: int or None
The timestep number to subset the WRF data into. Set to None to
load all of the data
"""
if not WRF_PYTHON_AVAILABLE:
raise ModuleNotFoundError("wrf-python must be installed in " +
"order to read WRF data.")
if z_range is None:
z_range = np.arange(0., 15000., 500.)
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3),
'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3),
'pi': 100. * ureg.kg / (ureg.m**3)}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
super()._add_vel_units()
self.q_names = {'cl': 'QCLOUD', 'ci': 'QICE',
'pl': 'QRAIN', 'qpi': 'QSNOW'}
self.q_field = "QVAPOR"
self.N_field = {'cl': 'QNCLOUD', 'ci': 'QNICE',
'pl': 'QNRAIN', 'pi': 'QNSNOW'}
self.p_field = "pressure"
self.z_field = "Z"
self.T_field = "T"
self.conv_frac_names = {'cl': 'conv_frac', 'ci': 'conv_frac',
'pl': 'conv_frac', 'pi': 'conv_frac'}
self.strat_frac_names = {'cl': 'strat_frac', 'ci': 'strat_frac',
'pl': 'strat_frac', 'pi': 'strat_frac'}
self.conv_frac_names_for_rad = {
'cl': 'conv_frac', 'ci': 'conv_frac',
'pl': 'conv_frac', 'pi': 'conv_frac'}
self.strat_frac_names_for_rad = {
'cl': 'strat_frac', 'ci': 'strat_frac',
'pl': 'strat_frac', 'pi': 'strat_frac'}
self.re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.strat_re_fields = {'cl': 'strat_cl_re', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_re', 'pl': 'strat_pl_frac'}
self.conv_re_fields = {'cl': 'conv_cl_re', 'ci': 'conv_ci_re',
'pi': 'conv_pi_re', 'pl': 'conv_pl_re'}
self.q_names_convective = {'cl': 'qclc', 'ci': 'qcic',
'pl': 'qplc', 'pi': 'qpic'}
self.q_names_stratiform = {'cl': 'qcls', 'ci': 'qcis',
'pl': 'qpls', 'pi': 'qpis'}
ds = xr.open_dataset(file_path)
wrfin = Dataset(file_path)
self.ds = {}
self.ds["pressure"] = ds["P"] + ds["PB"]
self.ds["pressure"].attrs["units"] = "hPa"
self.ds["Z"] = getvar(wrfin, "z", units="m", timeidx=ALL_TIMES)
self.ds["T"] = getvar(wrfin, "tk", timeidx=ALL_TIMES)
self.ds["T"] = self.ds["T"] + 273.15
self.ds["T"].attrs["units"] = "K"
W = getvar(wrfin, "wa", units="m s-1", timeidx=ALL_TIMES)
shp = W.values.shape
W = W.values.max(axis=1)
W = np.transpose(np.tile(W, (shp[1], 1, 1, 1)), [1, 0, 2, 3])
where_conv = np.where(W > w_thresh, 1, 0)
self.ds["conv_frac"] = xr.DataArray(
where_conv,
dims=('Time', 'bottom_top', 'north_south', 'east_west'))
self.ds["strat_frac"] = xr.DataArray(
1 - where_conv,
dims=('Time', 'bottom_top', 'north_south', 'east_west'))
self.ds["qclc"] = ds["QCLOUD"] * where_conv
self.ds["qcic"] = ds["QICE"] * where_conv
self.ds["qplc"] = ds["QRAIN"] * where_conv
self.ds["qpic"] = ds["QSNOW"] * where_conv
self.ds["qcls"] = ds["QCLOUD"] * (1 - where_conv)
self.ds["qcis"] = ds["QICE"] * (1 - where_conv)
self.ds["qpls"] = ds["QRAIN"] * (1 - where_conv)
self.ds["qpis"] = ds["QSNOW"] * (1 - where_conv)
self.ds["QNCLOUD"] = ds["QNCLOUD"]
self.ds["QNRAIN"] = ds["QNRAIN"]
self.ds["QNSNOW"] = ds["QNSNOW"]
self.ds["QNICE"] = ds["QNICE"]
self.ds["QVAPOR"] = ds["QVAPOR"]
self.time_dim = "Time"
self.height_dim = "bottom_top"
self.model_name = "WRF"
self.lat_name = "XLAT"
self.lon_name = "XLONG"
wrfin.close()
for keys in self.ds.keys():
try:
self.ds[keys] = self.ds[keys].drop("XTIME")
except KeyError:
continue
self.ds = xr.Dataset(self.ds)
# crop specific model output time range (if requested)
if time_range is not None:
super()._crop_time_range(time_range)
class DHARMA(Model):
def __init__(self, file_path, time_range=None):
"""
This loads a DHARMA simulation with all of the necessary parameters
for EMC^2 to run.
Parameters
----------
file_path: str
Path to a ModelE simulation.
time_range: tuple or None
Start and end time to include. If this is None, the entire
simulation will be included.
"""
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m**3), 'ci': 500. * ureg.kg / (ureg.m**3),
'pl': 1000. * ureg.kg / (ureg.m**3), 'pi': 100. * ureg.kg / (ureg.m**3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m**3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m**3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m**3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m**3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p"
self.z_field = "z"
self.T_field = "t"
self.height_dim = "hgt"
self.time_dim = "dom_col"
self.conv_frac_names = {'cl': 'conv_dat', 'ci': 'conv_dat',
'pl': 'conv_dat', 'pi': 'conv_dat'}
self.strat_frac_names = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pl': 'strat_pl_frac', 'pi': 'strat_pi_frac'}
self.conv_frac_names_for_rad = {'cl': 'conv_dat', 'ci': 'conv_dat',
'pl': 'conv_dat', 'pi': 'conv_dat'}
self.strat_frac_names_for_rad = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pl': 'strat_pl_frac', 'pi': 'strat_pi_frac'}
self.conv_re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.strat_re_fields = {'cl': 'strat_cl_frac', 'ci': 'strat_ci_frac',
'pi': 'strat_pi_frac', 'pl': 'strat_pl_frac'}
self.q_names_convective = {'cl': 'conv_dat', 'ci': 'conv_dat', 'pl': 'conv_dat', 'pi': 'conv_dat'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.ds = read_netcdf(file_path)
for variable in self.ds.variables.keys():
my_attrs = self.ds[variable].attrs
self.ds[variable] = self.ds[variable].astype('float64')
self.ds[variable].attrs = my_attrs
# es.keys():
# Check to make sure we are loading a single column
if 'lat' in [x for x in self.ds.dims.keys()]:
if self.ds.dims['lat'] != 1 or self.ds.dims['lon'] != 1:
self.ds.close()
raise RuntimeError("%s is not an SCM run. EMC^2 will only work with SCM runs." % file_path)
# No need for lat and lon dimensions
self.ds = self.ds.squeeze(dim=('lat', 'lon'))
# crop specific model output time range (if requested)
if time_range is not None:
super()._crop_time_range(time_range)
self.model_name = "DHARMA"
class TestModel(Model):
"""
This is a test Model structure used only for unit testing. It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 1, 1000) * ureg.gram / ureg.kilogram
N = 100 * np.ones_like(q) / (ureg.centimeter ** 3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - quantity(0.00649, 'kelvin/meter') * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC')
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c.magnitude / (temp_c.magnitude + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3)
times = xr.DataArray(np.array([0]), dims=('time'))
times.attrs["units"] = "seconds"
heights = xr.DataArray(heights.magnitude[np.newaxis, :], dims=('time', 'height'))
heights.attrs['units'] = "meter"
heights.attrs["long_name"] = "Height above MSL"
p_units = p.units
p = xr.DataArray(p.magnitude[np.newaxis, :], dims=('time', 'height'))
p.attrs["long_name"] = "Air pressure"
p.attrs["units"] = '%s' % p_units
qv_units = qv.units
qv = xr.DataArray(qv.magnitude[np.newaxis, :], dims=('time', 'height'))
qv.attrs["long_name"] = "Water vapor mixing ratio"
qv.attrs["units"] = '%s' % qv_units
t_units = temp_c.units
temp = xr.DataArray(temp_c.magnitude[np.newaxis, :], dims=('time', 'height'))
temp.attrs["long_name"] = "Air temperature"
temp.attrs["units"] = '%s' % t_units
q = xr.DataArray(q.magnitude[np.newaxis, :], dims=('time', 'height'))
q.attrs["long_name"] = "Liquid cloud water mixing ratio"
q.attrs["units"] = '%s' % qv_units
N = xr.DataArray(N.magnitude[np.newaxis, :], dims=('time', 'height'))
N.attrs["long_name"] = "Cloud particle number concentration"
N.attrs["units"] = '%s' % qv_units
my_ds = xr.Dataset({'p_3d': p, 'q': qv, 't': temp, 'z': heights,
'qcl': q, 'ncl': N, 'qpl': q, 'qci': q, 'qpi': q,
'time': times})
super().__init__()
self.Rho_hyd = {'cl': 1000. * ureg.kg / (ureg.m ** 3), 'ci': 500. * ureg.kg / (ureg.m ** 3),
'pl': 1000. * ureg.kg / (ureg.m ** 3), 'pi': 250. * ureg.kg / (ureg.m ** 3)}
self.fluffy = {'ci': 0.5 * ureg.dimensionless, 'pi': 0.5 * ureg.dimensionless}
self.lidar_ratio = {'cl': 18. * ureg.dimensionless,
'ci': 24. * ureg.dimensionless,
'pl': 5.5 * ureg.dimensionless,
'pi': 24.0 * ureg.dimensionless}
self.LDR_per_hyd = {'cl': 0.03 * 1 / (ureg.kg / (ureg.m ** 3)),
'ci': 0.35 * 1 / (ureg.kg / (ureg.m ** 3)),
'pl': 0.1 * 1 / (ureg.kg / (ureg.m ** 3)),
'pi': 0.40 * 1 / (ureg.kg / (ureg.m ** 3))}
self.vel_param_a = {'cl': 3e-7, 'ci': 700., 'pl': 841.997, 'pi': 11.72}
self.vel_param_b = {'cl': 2. * ureg.dimensionless,
'ci': 1. * ureg.dimensionless,
'pl': 0.8 * ureg.dimensionless,
'pi': 0.41 * ureg.dimensionless}
super()._add_vel_units()
self.q_names_convective = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_names_stratiform = {'cl': 'qcl', 'ci': 'qci', 'pl': 'qpl', 'pi': 'qpi'}
self.q_field = "q"
self.N_field = {'cl': 'ncl', 'ci': 'nci', 'pl': 'npl', 'pi': 'npi'}
self.p_field = "p_3d"
self.z_field = "z"
self.T_field = "t"
self.conv_frac_names = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.conv_frac_names_for_rad = {'cl': 'cldmccl', 'ci': 'cldmcci', 'pl': 'cldmcpl', 'pi': 'cldmcpi'}
self.strat_frac_names_for_rad = {'cl': 'cldsscl', 'ci': 'cldssci', 'pl': 'cldsspl', 'pi': 'cldsspi'}
self.ds = my_ds
self.height_dim = "height"
self.time_dim = "time"
class TestConvection(Model):
"""
This is a test Model structure used only for unit testing.
This model has a 100% convective column from 1 km to 11 km.
It is not recommended for end users.
"""
def __init__(self):
q = np.linspace(0, 1, 1000) * ureg.gram / ureg.kilogram
N = 100 * np.ones_like(q) * (ureg.centimeter ** -3)
Npl = 0.001 * np.ones_like(1) * (ureg.centimeter ** -3)
heights = np.linspace(0, 11000., 1000) * ureg.meter
temp = 15.04 * ureg.kelvin - 0.00649 * (ureg.kelvin / ureg.meter) * heights + 273.15 * ureg.kelvin
temp_c = temp.to('degC')
p = 1012.9 * ureg.hPa * (temp / (288.08 * ureg.kelvin)) ** 5.256
re_cl = 10 * np.ones_like(q) * ureg.micrometer
re_pl = 100 * np.ones_like(q) * ureg.micrometer
es = 0.6112 * ureg.hPa * np.exp(17.67 * temp_c.magnitude / (temp_c.magnitude + 243.5))
qv = 0.622 * es * 1e3 / (p * 1e2 - es * 1e3) * q.units
convective_liquid = np.logical_and(heights > 1000. * ureg.meter,
temp >= 273.15 * ureg.kelvin)
convective_ice = np.logical_and(heights > 1000. * ureg.meter,
temp < 273.15 * ureg.kelvin)
Nci = np.where(convective_ice, Npl.magnitude, 0)
Npi = np.where(convective_ice, Npl.magnitude, 0)
Npl = | np.where(convective_liquid, Npl.magnitude, 0) | numpy.where |
# Output GT heatmap as an auxiliary input
# Single object only
# Additionally output the previous image crop
# at the same location (but centered on the (current?) object)
import torch
import torchvision
from .abstract_datasets import DetectionDataset
import cv2
import os
import numpy as np
import json
import math
class Surgical_Hands_v2(DetectionDataset):
"""
Data annotated from publicly available surgical hand videos
x training samples
x testing samples
"""
def __init__(self, *args, **kwargs):
super(Surgical_Hands_v2, self).__init__(*args, **kwargs)
self.load_type = kwargs['load_type']
self.json_path = kwargs['json_path']
# Maximum number of annotated object present in a single frame in entire dataset
# Dictates the return size of annotations in __getitem__
self.max_objects = 1
self.sigma = kwargs['gaussian_sigma']
self.heatmap_size = kwargs['heatmap_size']
self.image_height = self.final_shape[0]
self.image_width = self.final_shape[1]
self.stride = (self.image_width / self.heatmap_size[0],
self.image_height / self.heatmap_size[1]) # effective stride of the entire network
self.num_keypoints = 21 # 21 annotated hand keypoints
self.sc = kwargs['sc']
self.mask_occ = False # Treat occluded keypoints as un-annotated, if False treat them as GT labels
self.joint_names = ['wrist', 'thumb_k', 'thumb_b', 'thumb_m', 'thumb_t', \
'index_k', 'index_b', 'index_m', 'index_t', \
'middle_k', 'middle_b', 'middle_m', 'middle_t', \
'ring_k', 'ring_b', 'ring_m', 'ring_t', \
'pinky_k', 'pinky_b', 'pinky_m', 'pinky_t']
self.neighbor_link = [[0, 1], [1, 2], [2, 3], [3, 4],
[0, 5], [5, 6], [6, 7], [7, 8],
[0, 9], [9, 10], [10, 11], [11, 12],
[0, 13], [13, 14], [14, 15], [15, 16],
[0, 17], [17, 18], [18, 19], [19, 20]]
# Colors RGB
self.colors = [[187, 38, 26], [187, 38, 26], [187, 38, 26], [187, 38, 26],
[172, 201, 63], [172, 201, 63], [172, 201, 63], [172, 201, 63],
[92, 200, 97], [92, 200, 97], [92, 200, 97], [92, 200, 97],
[28, 84, 197], [28, 84, 197], [28, 84, 197], [28, 84, 197],
[149, 40, 197], [149, 40, 197], [149, 40, 197], [149, 40, 197]]
self.categories = {'supercategory': 'hand',
'id': 2,
'name': 'hand', # maybe distinguish between left/right hand?
'keypoints': self.joint_names,
'skeleton': torch.Tensor(self.neighbor_link)}
self.viz = kwargs['viz']
if self.load_type == 'train':
self.transforms = kwargs['model_obj'].train_transforms
else:
self.transforms = kwargs['model_obj'].test_transforms
# Track statistics of hand positions through dataset
avg_hand_pts = np.zeros((self.num_keypoints, 2))
num_hand_pts = np.zeros((self.num_keypoints, 1))
print('{} samples in {}'.format(len(self.samples), self.load_type))
self.new_samples = []
0
self.img_id_to_kpts = {} # Mapping between images and keypoints within them
self.t1_to_t0 = {} # Point to the previous image. First image points to itself
kwargs.get('min_temporal_distance', 4)
min_temporal_dist = kwargs.get('min_temporal_dist', 4) # final name
vid_id_to_frames = {} # all the labeled frames in each vid_id
vid_id_to_path = {}
prev_vid_id = None
prev_frame_id = None
for idx, item in enumerate(self.samples):
width, height = item['frame_size']
vid_id = item['frames'][0]['vid_id']
labeled_frames = vid_id_to_frames.get(vid_id, [])
lbl_frame_paths = vid_id_to_path.get(vid_id, [])
for frm in item['frames']:
bbox_data = []
if not frm['is_labeled']:
continue
frame_id = int(frm['frame_id'])
frame_pth = frm['img_path']
labeled_frames.append(frame_id)
lbl_frame_paths.append(frame_pth)
if frame_id not in self.img_id_to_kpts:
self.img_id_to_kpts[frame_id] = {}
for obj in frm['objs']:
kpts = np.array(obj['hand_pts']).reshape(self.num_keypoints, 3)
# kpts - (x,y,visibility)
# visibility: 0 - unannotated, 1 - occluded, 2 - visible
if prev_vid_id != vid_id:
# self.t1_to_t0[frame_id] = {'frame_path':frame_pth, 'frame_id':frame_id} #first frame points to itself
self.t1_to_t0[frame_id] = None # first frame points to None
elif frame_id != prev_frame_id:
d = abs(frame_id - np.array(labeled_frames))
valid = np.array(labeled_frames)[d >= min_temporal_dist] # frames atleast t frames away
# selected_frame_id = max(valid, default=frame_id) #point to the closest valid frame, defaults to itself
selected_frame_id = max(valid, default=None)
try:
idx = labeled_frames.index(selected_frame_id)
selected_frame_path = lbl_frame_paths[idx]
except ValueError: # selected_frame_id is None, i.e. needs to be atleast t frames
selected_frame_path = None
self.t1_to_t0[frame_id] = {'frame_path': selected_frame_path, \
'frame_id': selected_frame_id} # point to the closest valid frame, defaults to None
#########Generate keypoints for aux input
trackid = obj['trackid']
unann = obj['occ']
obj_bbox = obj['bbox'] # [xmin, ymin, xmax, ymax]
hand_pts = obj['hand_pts'] # 21 points (x,y,visibility)
if not obj_bbox:
obj_bbox = np.zeros(4)
xmin, ymin, xmax, ymax = obj_bbox
# expand area around bbox
sc = self.sc
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2
cy = ymin + h / 2
w *= sc
h *= sc
xmin = int(cx - (w / 2))
ymin = int(cy - (h / 2))
xmax = int(cx + (w / 2))
ymax = int(cy + (h / 2))
# Pad images so hand is still in center of crop
pl = pt = pr = pb = 0
if xmin < 0:
pl = abs(xmin)
if ymin < 0:
pt = abs(ymin)
if xmax > (width + pl):
pr = abs(width - xmax)
if ymax > (height + pt):
pb = abs(height - ymax)
hand_crop = [xmin + pl, ymin + pt, xmax, ymax]
# incase annotations include invalid coords
hand_pts = np.array(hand_pts).reshape((self.num_keypoints, 3))
visibility = hand_pts[:, -1]
if self.mask_occ:
for i, v in enumerate(visibility):
if v == 1:
unann[i] = True
hand_pts += np.array([[pl, pt, 0]]) # Adjust keypoints by padding
# Crop hand and resize, perform same transforms to ground truth keypoints
mask = [True if (1 - o) else False for o in unann] # need a mask because invalid keypoints messes up the preprocessing
self.img_id_to_kpts[frame_id][trackid] = {'hand_pts': hand_pts, 'bbox': obj['bbox'], 'center': [cx, cy], \
'mask': mask, 'crop': hand_crop, 'padding': [pl, pt, pr, pb]}
# Track keypoint statistics, based on expected padding and crop
vis = (visibility[:, None] > 0)
avg_hand_pts += (hand_pts[:, :2] / np.array([[w, h]]) * vis)
num_hand_pts += vis
######################
prev_vid_id = vid_id
prev_frame_id = frame_id
if np.any(np.array(obj['bbox']) < 0):
# A keypoint is occluded if either the x or y coordinate is less than 0
occ_x = kpts[:, 0] < 0
occ_y = kpts[:, 1] < 0
occ_c = (kpts[:, 2] == 0)
occ = np.logical_or(occ_x, np.logical_or(occ_y, occ_c))
obj['occ'] = occ
elif np.any(kpts[:, 0] > width):
# A keypoint is occluded if either the x coordinate is greater than image width
occ_x = kpts[:, 0] > width
occ_c = (kpts[:, 2] == 0)
occ = np.logical_or(occ_x, occ_c)
obj['occ'] = occ
elif np.any(kpts[:, 1] > height):
# A keypoint is occluded if either the y coordinate is greater than image height
occ_y = kpts[:, 1] > height
occ_c = (kpts[:, 2] == 0)
occ = np.logical_or(occ_y, occ_c)
obj['occ'] = occ
# Don't keep samples with less than 2 keypoints visible
if sum(obj['occ']) >= (self.num_keypoints - 1) - 1:
continue
bbox_data.append(obj['bbox'])
new_item = {}
new_item['frames'] = [{'objs': [obj], 'img_path': frm['img_path'], \
'vid_id': vid_id, 'frame_id': frame_id, 'is_labeled': frm['is_labeled']}]
new_item['base_path'] = item['base_path']
new_item['frame_size'] = item['frame_size']
self.new_samples.append(new_item)
'''
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig = plt.figure()
ax = fig.add_subplot(111)
base_path = item['base_path']
frame_path = frm['img_path']
vis = (cv2.imread(frame_path)[...,::-1])
plt.imshow(vis)
for bbox in bbox_data:
#tight bbox
xmin, ymin, xmax, ymax = bbox
rect = patches.Rectangle((xmin,ymin), xmax-xmin, ymax-ymin, linewidth=2, edgecolor='g', facecolor='none')
ax.add_patch(rect)
plt.show()
'''
vid_id_to_frames[vid_id] = labeled_frames
vid_id_to_path[vid_id] = lbl_frame_paths
self.samples = self.new_samples
del self.new_samples
print('{} filtered samples in {}'.format(len(self.samples), self.load_type))
'''
#Calculate displacement of hands between min_temporal_dist frames
obj_dists = []
for t1 in self.t1_to_t0.keys():
t0 = self.t1_to_t0[t1]
if t0 is None or t0['frame_id'] is None:
continue
objs_t1 = self.img_id_to_kpts[t1]
objs_t0 = self.img_id_to_kpts[t0['frame_id']]
for tid, obj_t1 in objs_t1.items():
obj_t0 = objs_t0.get(tid, None)
if obj_t0 is None:
continue
kpt_t1_mean = np.mean(obj_t1['hand_pts'][obj_t1['mask']], axis=0)[:2]
kpt_t0_mean = np.mean(obj_t0['hand_pts'][obj_t0['mask']], axis=0)[:2]
dist = np.linalg.norm(kpt_t0_mean - kpt_t1_mean) #Euclidean distance between both centers
if math.isnan(dist):
continue
obj_dists.append(dist)
print('Mean dist: {}'.format(np.mean(obj_dists)))
print('Median dist: {}'.format(np.median(obj_dists)))
print('Max dist: {}'.format(np.max(obj_dists)))
import pdb; pdb.set_trace()
import matplotlib.pyplot as plt
plt.hist(obj_dists, bins=30)
plt.show()
'''
# Adapted from: https://github.com/microsoft/human-pose-estimation.pytorch
def generate_target(self, joints):
"""
:param joints: [num_joints, 3]
:param joints_vis: [num_joints, 3]
:return: target, target_weight(1: visible, 0: invisible)
"""
target_weight = np.ones((self.num_keypoints, 1), dtype=np.float32)
target_weight[:, 0] = joints[:, -1]
target = np.zeros((self.num_keypoints,
self.heatmap_size[1],
self.heatmap_size[0]),
dtype=np.float32)
tmp_size = self.sigma * 3
for joint_id in range(self.num_keypoints):
mu_x = int(joints[joint_id][0] / self.stride[0] + 0.5)
mu_y = int(joints[joint_id][1] / self.stride[1] + 0.5)
# Check that any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight
def __getitem__(self, idx):
vid_info = self.samples[idx]
base_path = vid_info['base_path']
vid_size = vid_info['frame_size']
input_data = []
np.zeros((self.clip_length, self.final_shape[0], self.final_shape[1], 3)) - 1
bbox_data = np.zeros((self.clip_length, 4)) - 1
hand_crops = np.zeros((self.clip_length, 4)) - 1
hand_pts_coords = np.zeros((self.clip_length, self.num_keypoints, 3)) - 1
org_hand_pts = np.zeros((self.clip_length, self.num_keypoints, 2)) - 1
obj_ids = np.zeros(self.clip_length, dtype=np.int64) - 1
labels = np.zeros(self.clip_length) - 1
unannotated = np.zeros((self.clip_length, 21), dtype=np.int32) - 1 # 21 keypoints
np.zeros((self.clip_length, 21), dtype=np.int32)
padding = np.zeros((self.clip_length, 4), dtype=np.int32) # pl, pt, pr, pb
target = np.zeros((self.clip_length, self.num_keypoints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32) - 1
target_weight = np.zeros((self.clip_length, self.num_keypoints, 1), dtype=np.float32) - 1
frame_ids = np.zeros(self.clip_length, dtype=np.int64)
frame_paths = []
for frame_ind in range(len(vid_info['frames'])):
frame = vid_info['frames'][frame_ind]
width, height = vid_info['frame_size']
frame_path = frame['img_path']
frame['vid_id']
frame_id = frame['frame_id']
# Extract bbox and label data from video info
frame_paths.append(frame_path)
frame_ids[frame_ind] = frame_id
# Load frame, convert to RGB from BGR and normalize from 0 to 1
input_data = cv2.imread(frame_path)[..., ::-1]
for obj in frame['objs']:
trackid = obj['trackid'] # Let's ignore trackid for now, only one annotation per image
obj_id = obj['id']
label = 0 if obj['c'] == 'left' else 1 # 0: left hand, 1: right hand
unann = obj['occ']
obj_bbox = obj['bbox'] # [xmin, ymin, xmax, ymax]
hand_pts = obj['hand_pts'] # 21 points (x,y,visibility)
xmin, ymin, xmax, ymax = obj_bbox
# ensure bounding box encompasses all keypoints - error occurs otherwise
hand_pts = np.array(hand_pts).reshape((self.num_keypoints, 3))
_mask = hand_pts[:, -1] > 0
xpt_max, ypt_max, _ = np.max(hand_pts[_mask], axis=0)
xpt_min, ypt_min, _ = np.min(hand_pts[_mask], axis=0)
xtl_adjust = np.clip(xmin - xpt_min, a_min=0, a_max=None)
ytl_adjust = np.clip(ymin - ypt_min, a_min=0, a_max=None)
xbr_adjust = np.clip(xpt_max - xmax, a_min=0, a_max=None)
ybr_adjust = np.clip(ypt_max - ymax, a_min=0, a_max=None)
xmin -= xtl_adjust
ymin -= ytl_adjust
xmax += xbr_adjust
ymax += ybr_adjust
# expand area around bbox
sc = self.sc
w = xmax - xmin
h = ymax - ymin
cx = xmin + w / 2
cy = ymin + h / 2
w *= sc
h *= sc
xmin = int(cx - (w / 2))
ymin = int(cy - (h / 2))
xmax = int(cx + (w / 2))
ymax = int(cy + (h / 2))
# Pad images so hand is still in center of crop
pl = pt = pr = pb = 0
if xmin < 0:
pl = abs(xmin)
if ymin < 0:
pt = abs(ymin)
if xmax > (width + pl):
pr = abs(width - xmax)
if ymax > (height + pt):
pb = abs(height - ymax)
hand_crop = [xmin + pl, ymin + pt, xmax, ymax]
# incase annotations include invalid coords
vis = hand_pts[:, -1]
if self.mask_occ:
for i, v in enumerate(vis):
if v == 1:
unann[i] = True
org_hand_pts[frame_ind] = hand_pts[:, :2]
hand_pts += np.array([[pl, pt, 0]]) # Adjust keypoints by padding
# hand_pts[:,0] = np.clip(hand_pts[:,0], 0, width)
# hand_pts[:,1] = np.clip(hand_pts[:,1], 0, height)
hand_pts[:, 2] = np.clip(hand_pts[:, 2], 0, 1)
# Let's make the obj_id numeric only
obj_id = int(''.join((obj_id.split('_')[-4:])))
bbox_data[frame_ind] = obj_bbox
obj_ids[frame_ind] = obj_id
labels[frame_ind] = label
hand_pts_coords[frame_ind] = hand_pts
hand_crops[frame_ind] = hand_crop
unannotated[frame_ind] = unann
padding[frame_ind] = [pl, pt, pr, pb]
# Crop hand and resize, perform same transforms to ground truth keypoints
mask = [True if (1 - o) else False for o in unann] # need a mask because invalid keypoints messes up the preprocessing
vid_data, temp, out_params = self.transforms(cv2.copyMakeBorder(input_data, pt, pb, pl, pr, cv2.BORDER_CONSTANT, value=0)[None], {'bbox_data': hand_pts_coords[None, :, mask, :2], 'hand_crop': hand_crop, 'label': labels})
flipped = out_params['flip']
angle = out_params.get('out_rot', None)
hand_pts_coords[None, :, mask, :2] = temp
obj_trgt, obj_trgt_wght = self.generate_target(hand_pts_coords[0])
target[frame_ind] = obj_trgt
target_weight[frame_ind] = obj_trgt_wght
np.zeros((height, width, 3), dtype=np.float32)
aux_input = np.zeros((self.num_keypoints, self.heatmap_size[1], self.heatmap_size[0]), dtype=np.float32)
aux_data = np.zeros((self.clip_length, self.final_shape[0], self.final_shape[1], 3), dtype=np.float32)
aux_pts_coords = | np.zeros((self.clip_length, self.num_keypoints, 3)) | numpy.zeros |
"""
Copyright (c) 2014 NavPy Developers. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in
LICENSE.txt
"""
import numpy as np
from . import wgs84
from ..utils import input_check_Nx3 as _input_check_Nx3
from ..utils import input_check_Nx3x3 as _input_check_Nx3x3
from ..utils import input_check_Nx1 as _input_check_Nx1
def angle2dcm(rotAngle1, rotAngle2, rotAngle3, input_unit='rad',
rotation_sequence='ZYX', output_type='ndarray'):
"""
This function converts Euler Angle into Direction Cosine Matrix (DCM).
The DCM is described by three sucessive rotation rotAngle1, rotAngle2, and
rotAngle3 about the axes described by the rotation_sequence.
The default rotation_sequence='ZYX' is the aerospace sequence and rotAngle1
is the yaw angle, rotAngle2 is the pitch angle, and rotAngle3 is the roll
angle. In this case DCM transforms a vector from the locally level
coordinate frame (i.e. the NED frame) to the body frame.
This function can batch process a series of rotations (e.g., time series
of Euler angles).
Parameters
----------
rotAngle1, rotAngle2, rotAngle3 : angles {(N,), (N,1), or (1,N)}
They are a sequence of angles about successive axes described by
rotation_sequence.
input_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
output_type : {'ndarray','matrix'}, optional
Output type. Default is 'ndarray'.
Returns
--------
C : {3x3} Direction Cosine Matrix
Notes
-----
Programmer: <NAME>
Created: May 03, 2011
Last Modified: January 12, 2016
"""
rotAngle1, N1 = _input_check_Nx1(rotAngle1)
rotAngle2, N2 = _input_check_Nx1(rotAngle2)
rotAngle3, N3 = _input_check_Nx1(rotAngle3)
if(N1 != N2 or N1 != N3):
raise ValueError('Inputs are not of same dimensions')
if(N1 > 1 and output_type != 'ndarray'):
raise ValueError('Matrix output requires scalar inputs')
R3 = np.zeros((N1, 3, 3))
R2 = np.zeros((N1, 3, 3))
R1 = np.zeros((N1, 3, 3))
if(input_unit == 'deg'):
rotAngle1 = np.deg2rad(rotAngle1)
rotAngle2 = np.deg2rad(rotAngle2)
rotAngle3 = np.deg2rad(rotAngle3)
R3[:, 2, 2] = 1.0
R3[:, 0, 0] = np.cos(rotAngle1)
R3[:, 0, 1] = np.sin(rotAngle1)
R3[:, 1, 0] = -np.sin(rotAngle1)
R3[:, 1, 1] = np.cos(rotAngle1)
R2[:, 1, 1] = 1.0
R2[:, 0, 0] = np.cos(rotAngle2)
R2[:, 0, 2] = -np.sin(rotAngle2)
R2[:, 2, 0] = np.sin(rotAngle2)
R2[:, 2, 2] = np.cos(rotAngle2)
R1[:, 0, 0] = 1.0
R1[:, 1, 1] = np.cos(rotAngle3)
R1[:, 1, 2] = np.sin(rotAngle3)
R1[:, 2, 1] = -np.sin(rotAngle3)
R1[:, 2, 2] = np.cos(rotAngle3)
if rotation_sequence == 'ZYX':
try:
# Equivalent to C = R1.dot(R2.dot(R3)) for each of N inputs but
# implemented efficiently in C extension
C = np.einsum('nij, njk, nkm -> nim', R1, R2, R3)
except AttributeError:
# Older NumPy without einsum
C = np.zeros((N1, 3, 3))
for i, (R1, R2, R3) in enumerate(zip(R1, R2, R3)):
C[i] = R1.dot(R2.dot(R3))
else:
raise NotImplementedError('Rotation sequences other than ZYX are not currently implemented')
if(N1 == 1):
C = C[0]
if(output_type == 'matrix'):
C = np.matrix(C)
return C
def dcm2angle(C, output_unit='rad', rotation_sequence='ZYX'):
"""
This function converts a Direction Cosine Matrix (DCM) into the three
rotation angles.
The DCM is described by three sucessive rotation rotAngle1, rotAngle2, and
rotAngle3 about the axes described by the rotation_sequence.
The default rotation_sequence='ZYX' is the aerospace sequence and rotAngle1
is the yaw angle, rotAngle2 is the pitch angle, and rotAngle3 is the roll
angle. In this case DCM transforms a vector from the locally level
coordinate frame (i.e. the NED frame) to the body frame.
This function can batch process a series of rotations (e.g., time series
of direction cosine matrices).
Parameters
----------
C : {(3,3), (N,3,3), or (3,3,N)}
direction consine matrix that rotates the vector from the first frame
to the second frame according to the specified rotation_sequence.
output_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
Returns
-------
rotAngle1, rotAngle2, rotAngle3 : angles
They are a sequence of angles about successive axes described by
rotation_sequence.
Notes
-----
The returned rotAngle1 and 3 will be between +/- 180 deg (+/- pi rad).
In contrast, rotAngle2 will be in the interval +/- 90 deg (+/- pi/2 rad).
In the 'ZYX' or '321' aerospace sequence, that means the pitch angle
returned will always be inside the closed interval +/- 90 deg (+/- pi/2 rad).
Applications where pitch angles near or larger than 90 degrees in magnitude
are expected should used alternate attitude parameterizations like
quaternions.
"""
C, N = _input_check_Nx3x3(C)
if(rotation_sequence == 'ZYX'):
rotAngle1 = np.arctan2(C[..., 0, 1], C[..., 0, 0]) # Yaw
rotAngle2 = -np.arcsin(C[..., 0, 2]) # Pitch
rotAngle3 = np.arctan2(C[..., 1, 2], C[..., 2, 2]) # Roll
else:
raise NotImplementedError('Rotation sequences other than ZYX are not currently implemented')
if(output_unit == 'deg'):
rotAngle1 = np.rad2deg(rotAngle1)
rotAngle2 = np.rad2deg(rotAngle2)
rotAngle3 = np.rad2deg(rotAngle3)
return rotAngle1, rotAngle2, rotAngle3
def omega2rates(pitch, roll, input_unit='rad',
euler_angles_order='roll_pitch_yaw', output_type='ndarray'):
"""
This function is used to create the transformation matrix to go from:
[p, q, r] --> [roll_rate, pitch_rate, yaw_rate]
where pqr are xyz body rotation-rate measurements expressed in body frame.
Yaw, pitch, and roll are the Euler angles. We assume the Euler angles are
3-2-1 (i.e Yaw -> Pitch -> Roll) transformations that go from navigation-
frame to body-frame.
Parameters
----------
pitch : pitch angle, units of input_unit.
roll : roll angle , units of input_unit.
input_unit : units for input angles {'rad', 'deg'}, optional
euler_angles_order : {'roll_pitch_yaw', 'yaw_pitch_roll'}, optional
Assumed order of Euler Angles attitude state vector (see ``Notes``).
output_type : {'ndarray' or 'matrix'}, optional
Numpy array (default) or matrix
Returns
-------
R : transformation matrix, from xyz body-rate to Euler angle-rates
numpy 'output_type' 3x3 (Note: default return variable is an ARRAY,
not a matrix)
Notes
-----
Since the returned transformation matrix is used to transform one vector
to another, the assumed attitude variables order matters.
The ``euler_angles_order`` parameter can be used to specify the assumed
order.
The difference is demonstrated by example:
By default euler_angles_order='roll_pitch_yaw'
R = omega2rates(pitch, roll)
[ roll_rate] [omega_x]
[pitch_rate] = dot(R,[omega_y])
[ yaw_rate] [omega_z]
Now assume our attitude state is [yaw, pitch, roll].T
R = omega2rates(pitch, roll, euler_angles_order='yaw_pitch_roll')
[ yaw_rate] [omega_x]
[pitch_rate] = dot(R,[omega_y])
[ roll_rate] [omega_z]
References
----------
[1] Equation 2.74, Aided Navigation: GPS with High Rate Sensors,
<NAME> 2008
[2] omega2rates.m function at:
http://www.gnssapplications.org/downloads/chapter7/Chapter7_GNSS_INS_Functions.tar.gz
"""
# Apply necessary unit transformations.
if input_unit == 'rad':
pitch_rad, roll_rad = pitch, roll
elif input_unit == 'deg':
pitch_rad, roll_rad = np.radians([pitch, roll])
# Build transformation matrix.
s_r, c_r = np.sin( roll_rad), np.cos( roll_rad)
s_p, c_p = np.sin(pitch_rad), np.cos(pitch_rad)
# Check for singularities (i.e. pitch near 90 degrees)
singular_tol = 1e-2; # flags anything between [90 +/- .5 deg]
if abs(c_p) < singular_tol:
print('WARNING (omega2rates): Operating near pitch = 90 deg singularity. NaN returned. ')
return np.nan
if euler_angles_order == 'roll_pitch_yaw':
R = np.array(
[[ 1, s_r*s_p/c_p, c_r*s_p/c_p],
[ 0, c_r , -s_r ],
[ 0, s_r/c_p , c_r/c_p ]], dtype=float)
elif euler_angles_order == 'yaw_pitch_roll':
R = np.array(
[[ 0, s_r/c_p , c_r/c_p ],
[ 0, c_r , -s_r ],
[ 1, s_r*s_p/c_p, c_r*s_p/c_p]], dtype=float)
if output_type == 'ndarray':
pass
elif output_type=='matrix':
R = np.matrix(R)
else:
print("WARNING (omega2rates): Unrecognized 'output_type' requested.")
print("NaN is returned.")
return np.nan
return R
def angle2quat(rotAngle1,rotAngle2,rotAngle3,
input_unit='rad',rotation_sequence='ZYX'):
"""
Convert a sequence of rotation angles to an equivalent unit quaternion
This function can take inputs in either degree or radians, and can also
batch process a series of rotations (e.g., time series of Euler angles).
By default this function assumes aerospace rotation sequence but can be
changed using the ``rotation_sequence`` keyword argument.
Parameters
----------
rotAngle1, rotAngle2, rotAngle3 : {(N,), (N,1), or (1,N)}
They are a sequence of angles about successive axes described by rotation_sequence.
input_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
Returns
-------
q0 : {(N,)} array like scalar componenet of the quaternion
qvec : {(N,3)} array like vector component of the quaternion
Notes
-----
Convert rotation angles to unit quaternion that transforms a vector in F1 to
F2 according to
:math:`v_q^{F2} = q^{-1} \otimes v_q^{F1} \otimes q`
where :math:`\otimes` indicates the quaternion multiplcation and :math:`v_q^F`
is a pure quaternion representation of the vector :math:`v_q^F`. The scalar
componenet of :math:`v_q^F` is zero.
For aerospace sequence ('ZYX'): rotAngle1 = psi, rotAngle2 = the,
and rotAngle3 = phi
Examples
--------
>>> import numpy as np
>>> from navpy import angle2quat
>>> psi = 0
>>> theta = np.pi/4.0
>>> phi = np.pi/3.0
>>> q0, qvec = angle2quat(psi,theta,phi)
>>> q0
0.80010314519126557
>>> qvec
array([ 0.46193977, 0.33141357, -0.19134172])
>>> psi = [10, 20, 30]
>>> theta = [30, 40, 50]
>>> phi = [0, 5, 10]
>>> q0, qvec = angle2quat(psi,theta,phi,input_unit = 'deg')
>>> q0
array([ 0.96225019, 0.92712639, 0.88162808])
>>> qvec
array([[-0.02255757, 0.25783416, 0.08418598],
[-0.01896854, 0.34362114, 0.14832854],
[-0.03266701, 0.4271086 , 0.19809857]])
"""
# INPUT CHECK
rotAngle1,N1 = _input_check_Nx1(rotAngle1)
rotAngle2,N2 = _input_check_Nx1(rotAngle2)
rotAngle3,N3 = _input_check_Nx1(rotAngle3)
if( (N1!=N2) | (N1!=N3) | (N2!=N3) ):
raise ValueError('Inputs are not of same dimensions')
q0 = np.zeros(N1)
qvec = np.zeros((N1,3))
if(input_unit=='deg'):
rotAngle1 = np.deg2rad(rotAngle1)
rotAngle2 = np.deg2rad(rotAngle2)
rotAngle3 = np.deg2rad(rotAngle3)
rotAngle1 /= 2.0
rotAngle2 /= 2.0
rotAngle3 /= 2.0
if(rotation_sequence=='ZYX'):
q0[:] = np.cos(rotAngle1)* | np.cos(rotAngle2) | numpy.cos |
import sys
from typing import Any
import numpy as np
class Index:
def __index__(self) -> int:
return 0
class SubClass(np.ndarray):
pass
def func(i: int, j: int, **kwargs: Any) -> SubClass:
return B
i8 = np.int64(1)
A = np.array([1])
B = A.view(SubClass).copy()
B_stack = np.array([[1], [1]]).view(SubClass)
C = [1]
if sys.version_info >= (3, 8):
np.ndarray(Index())
np.ndarray([Index()])
np.array(1, dtype=float)
np.array(1, copy=False)
np.array(1, order='F')
np.array(1, order=None)
np.array(1, subok=True)
np.array(1, ndmin=3)
np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
np.asarray(A)
np.asarray(B)
np.asarray(C)
np.asanyarray(A)
np.asanyarray(B)
np.asanyarray(B, dtype=int)
np.asanyarray(C)
np.ascontiguousarray(A)
np.ascontiguousarray(B)
np.ascontiguousarray(C)
np.asfortranarray(A)
np.asfortranarray(B)
np.asfortranarray(C)
np.require(A)
np.require(B)
np.require(B, dtype=int)
np.require(B, requirements=None)
np.require(B, requirements="E")
np.require(B, requirements=["ENSUREARRAY"])
np.require(B, requirements={"F", "E"})
np.require(B, requirements=["C", "OWNDATA"])
np.require(B, requirements="W")
np.require(B, requirements="A")
np.require(C)
np.linspace(0, 2)
np.linspace(0.5, [0, 1, 2])
np.linspace([0, 1, 2], 3)
np.linspace(0j, 2)
np.linspace(0, 2, num=10)
np.linspace(0, 2, endpoint=True)
np.linspace(0, 2, retstep=True)
np.linspace(0j, 2j, retstep=True)
np.linspace(0, 2, dtype=bool)
np.linspace([0, 1], [2, 3], axis=Index())
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=2)
np.logspace(0, 2, base=[1j, 2j], num=2)
np.geomspace(1, 2)
np.zeros_like(A)
np.zeros_like(C)
np.zeros_like(B)
np.zeros_like(B, dtype=np.int64)
np.ones_like(A)
np.ones_like(C)
np.ones_like(B)
np.ones_like(B, dtype=np.int64)
np.empty_like(A)
np.empty_like(C)
np.empty_like(B)
np.empty_like(B, dtype=np.int64)
np.full_like(A, i8)
np.full_like(C, i8)
np.full_like(B, i8)
np.full_like(B, i8, dtype=np.int64)
np.ones(1)
np.ones([1, 1, 1])
np.full(1, i8)
np.full([1, 1, 1], i8)
np.indices([1, 2, 3])
np.indices([1, 2, 3], sparse=True)
np.fromfunction(func, (3, 5))
np.identity(10)
np.atleast_1d(C)
np.atleast_1d(A)
np.atleast_1d(C, C)
np.atleast_1d(C, A)
np.atleast_1d(A, A)
np.atleast_2d(C)
np.atleast_3d(C)
np.vstack([C, C])
np.vstack([C, A])
np.vstack([A, A])
np.hstack([C, C])
np.stack([C, C])
| np.stack([C, C], axis=0) | numpy.stack |
import os
import keras
import keras.backend as backend
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.callbacks import CSVLogger, History
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize, LabelEncoder, label_binarize
"""
Created by <NAME> on 8/1/18.
Email : <EMAIL> or <EMAIL>
Website: http://ce.sharif.edu/~naghipourfar
Github: https://github.com/naghipourfar
Skype: mn7697np
"""
n_epochs = 300
batch_size = 32
def create_regressor(n_features, layers, n_outputs, optimizer=None):
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
model = Model(inputs=input_layer, outputs=dense)
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(optimizer=optimizer, loss=["mse"], metrics=["mae"])
return model
def random_classifier(drug_name=None, prediction_class=None):
accuracies = {}
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv") and not (
compound.__contains__("PLX4720") or compound.__contains__("Panobinostat")):
name = compound.split(".")[0]
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
n_samples = x_data.shape[0]
if prediction_class is None:
y_pred = np.random.random_integers(low=0, high=1, size=(n_samples, 1))
else:
if prediction_class == 1:
y_pred = np.ones(shape=[n_samples, 1])
else:
y_pred = np.zeros(shape=[n_samples, 1])
accuracies[name] = accuracy_score(y_data, y_pred)
print("%s's Accuracy\t:\t%.4f%%" % (compound.split(".")[0], 100 * accuracy_score(y_data, y_pred)))
log_path = "../Results/Classification/ML/"
log_name = "Random" + "-" + str(prediction_class) + ".csv" if prediction_class is not None else "Random.csv"
accuracies = pd.DataFrame(accuracies, index=[0])
accuracies.to_csv(log_path + log_name)
def create_SAE(n_features=50000, n_code=12):
input_layer = Input(shape=(n_features,))
dense = Dense(2048, activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.2)(dense)
dense = Dense(1024, activation='relu', name="dense_1")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(256, activation='relu', name="dense_2")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
dense = Dense(64, activation='relu', name="dense_3")(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
encoded = Dense(n_code, activation='relu', name="encoded")(dense)
dense = Dense(512, activation="relu", name="dense_4")(encoded)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
decoded = Dense(n_features, activation='sigmoid', name="decoded")(dense)
cl_output = Dense(2, activation="softmax", name="classifier")(encoded)
model = Model(inputs=input_layer, outputs=[decoded, cl_output])
model.summary()
lambda_value = 9.5581e-3
def contractive_loss(y_pred, y_true):
mse = backend.mean(backend.square(y_true - y_pred), axis=1)
w = backend.variable(value=model.get_layer('encoded').get_weights()[0]) # N inputs N_hidden
w = backend.transpose(w) # N_hidden inputs N
h = model.get_layer('encoded').output
dh = h * (1 - h) # N_batch inputs N_hidden
# N_batch inputs N_hidden * N_hidden inputs 1 = N_batch inputs 1
contractive = lambda_value * backend.sum(dh ** 2 * backend.sum(w ** 2, axis=1), axis=1)
return mse + contractive
reconstructor_loss = contractive_loss
classifier_loss = "categorical_crossentropy"
optimizer = keras.optimizers.Nadam(lr=0.005, beta_1=0.95)
model.compile(optimizer=optimizer, loss=[reconstructor_loss, classifier_loss],
loss_weights=[0.005, 0.005],
metrics={"decoded": ["mae", "mse", "mape"], "classifier": "acc"})
return model
def create_classifier(n_features=51, layers=None, n_outputs=1):
if layers is None:
layers = [1024, 256, 64, 16, 4]
input_layer = Input(shape=(n_features,))
dense = Dense(layers[0], activation='relu', name="dense_0")(input_layer)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
for i, layer in enumerate(layers[1:]):
dense = Dense(layer, activation='relu', name="dense_{0}".format(i + 1))(dense)
dense = BatchNormalization()(dense)
dense = Dropout(0.5)(dense)
optimizer = keras.optimizers.adamax()
if n_outputs > 1:
dense = Dense(n_outputs, activation='softmax', name="output")(dense)
loss = keras.losses.categorical_crossentropy
else:
dense = Dense(n_outputs, activation='sigmoid', name="output")(dense)
loss = keras.losses.binary_crossentropy
model = Model(inputs=input_layer, outputs=dense)
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_data(data_path="../Data/CCLE/drug_response.csv", feature_selection=False):
if data_path.__contains__("/FS/"):
data = pd.read_csv(data_path)
else:
data = pd.read_csv(data_path, index_col="Cell Line")
if data_path.__contains__("Regression"):
y_data = data['IC50 (uM)']
x_data = data.drop(['IC50 (uM)'], axis=1)
else:
y_data = data['class']
x_data = data.drop(['class'], axis=1)
label_encoder = LabelEncoder()
y_data = label_encoder.fit_transform(y_data)
y_data = np.reshape(y_data, (-1, 1))
y_data = keras.utils.to_categorical(y_data, 2)
if feature_selection and not data_path.__contains__("/FS/"):
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
x_data = data[feature_names]
return np.array(x_data), np.array(y_data)
def produce_classification_data(compounds):
for compound in compounds:
name = compound.split(".")[0]
print(compound, end="\t")
data = pd.read_csv("../Data/CCLE/Regression/" + name + "_preprocessed.csv")
data['class'] = np.nan
data.loc[data['IC50 (uM)'] >= 8, 'class'] = 1 # resistant
data.loc[data['IC50 (uM)'] < 8] = 0 # sensitive
data.dropna(how='any', axis=0, inplace=True)
data.drop(["IC50 (uM)"], axis=1, inplace=True)
data.to_csv("../Data/CCLE/Classification/" + name + ".csv", index_label="Cell Line")
print("Finished!")
def normalize_data(x_data, y_data=None):
x_data = pd.DataFrame(normalize(np.array(x_data), axis=0, norm='max')).values
if y_data is not None:
y_data = pd.DataFrame(np.reshape(np.array(y_data), (-1, 1)))
y_data = pd.DataFrame(normalize(np.array(y_data), axis=0, norm='max'))
return np.array(x_data), np.array(y_data)
return np.array(x_data)
def regressor(drug_name=None):
data_directory = '../Data/CCLE/Regression/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith("_preprocessed.csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.15, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
# for optimizer in optimizers:
model = create_regressor(x_train.shape[1], [1024, 256, 64, 4], 1, None)
logger_path = '../Results/Regression/' + compound.split(".")[0] + ".log"
csv_logger = CSVLogger(logger_path)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
result = pd.read_csv(logger_path, delimiter=',')
plt.figure(figsize=(15, 10))
plt.plot(result['epoch'], result["loss"], label="Training Loss")
plt.plot(result['epoch'], result["val_loss"], label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("MSE Loss")
plt.xticks([i for i in range(0, n_epochs + 5, 5)])
plt.yticks(np.arange(0.25, -0.05, -0.05).tolist())
plt.title(compound.split(".")[0])
plt.grid()
plt.savefig("../Results/Regression/images/%s.png" % compound.split(".")[0])
plt.close("all")
model.save("../Results/Regression/%s.h5" % compound.split(".")[0])
def regressor_with_different_optimizers():
data_path = "../Data/CCLE/Regression/ZD-6474_preprocessed.csv"
optimizers = [
keras.optimizers.SGD(lr=0.1, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True),
keras.optimizers.Adagrad(lr=0.01, decay=1e-6),
keras.optimizers.Adadelta(lr=1.0, rho=0.95, decay=1e-6),
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.99, decay=1e-6),
keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
]
print("Loading Data...")
x_data, y_data = load_data(data_path, feature_selection=True)
print("Data has been Loaded.")
print("Normalizing Data...")
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized.")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
n_features = x_train.shape[1]
layers = [1024, 256, 64, 8]
n_outputs = 1
for idx, optimizer in enumerate(optimizers):
model = create_regressor(n_features, layers, n_outputs, optimizer)
logger_path = "../Results/Optimizers/"
optimizer_name = str(optimizer.__class__).split(".")[-1].split("\'")[0] + "_"
optimizer_name += '_'.join(
["%s_%.4f" % (key, value) for (key, value) in optimizer.get_config().items()])
optimizer_name += '.log'
csv_logger = CSVLogger(logger_path + optimizer_name)
model.summary()
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
def regressor_with_k_best_features(k=50):
data_directory = '../Data/CCLE/'
compounds = os.listdir(data_directory)
feature_names = list(pd.read_csv("../Data/BestFeatures.csv", header=None).loc[0, :])
for compound in compounds:
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound)
print("Data has been Loaded!")
x_data = x_data[feature_names]
x_data, y_data = normalize_data(x_data, y_data)
print("Data has been normalized!")
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, shuffle=True)
print("x_train shape\t:\t" + str(x_train.shape))
print("y_train shape\t:\t" + str(y_train.shape))
print("x_test shape\t:\t" + str(x_test.shape))
print("y_test shape\t:\t" + str(y_test.shape))
for k in [50, 40, 30, 20, 10, 5, 4, 3, 2, 1]:
model = create_regressor(x_train.shape[1], [32, 16, 4], 1)
dir_name = "../Results/Drugs/%s/%dFeaturesSelection" % (compound.split(".")[0], k)
os.makedirs(dir_name)
csv_logger = CSVLogger(dir_name + '/best_%s_%d.log' % (compound.split(".")[0], k))
model.fit(x=x_train,
y=y_train,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(x_test, y_test),
verbose=2,
shuffle=True,
callbacks=[csv_logger])
import csv
with open("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), 'a') as file:
writer = csv.writer(file)
loss = model.evaluate(x_test.as_matrix(), y_test.as_matrix(), verbose=0)
loss.insert(0, k)
writer.writerow(loss)
df = pd.read_csv("../Results/Drugs/%s/%s.csv" % (compound.split(".")[0], compound.split(".")[0]), header=None)
plt.figure()
plt.plot(df[0], df[1], "-o")
plt.xlabel("# of Features")
plt.ylabel("Mean Absolute Error")
plt.title(compound.split(".")[0])
plt.savefig("../Results/Drugs/%s/%s.png" % (compound.split(".")[0], compound.split(".")[0]))
def classifier(drug_name=None):
data_directory = '../Data/CCLE/Classification/FS/'
if drug_name:
compounds = [drug_name + ".csv"]
else:
compounds = os.listdir(data_directory)
print("All Compounds:")
print(compounds)
for compound in compounds:
if compound.endswith(".csv"):
print("*" * 50)
print(compound)
print("Loading Data...")
x_data, y_data = load_data(data_path=data_directory + compound, feature_selection=True)
print("Data has been Loaded!")
x_data = normalize_data(x_data)
print("Data has been normalized!")
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.05, shuffle=True)
# print("x_train shape\t:\t" + str(x_train.shape))
# print("y_train shape\t:\t" + str(y_train.shape))
# print("x_test shape\t:\t" + str(x_test.shape))
# print("y_test shape\t:\t" + str(y_test.shape))
logger_path = "../Results/Classification/CV/"
# plt.figure(figsize=(15, 10))
# plt.title(compound.split(".")[0])
model = None
for k in range(10, 15, 5):
model = KerasClassifier(build_fn=create_classifier,
epochs=500,
batch_size=64,
verbose=2,
)
# y_data = encode_labels(y_data, 2)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.25)
model.fit(x_train, y_train, validation_data=(x_test, y_test))
print(x_test.shape)
print(y_test.shape)
y_pred = model.predict(x_test)
y_pred = | np.reshape(y_pred, (-1, 1)) | numpy.reshape |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>
# <NAME> <<EMAIL>>
#
#import numpy as np
from pyscf import lib
from pyscf.pbc.lib import kpts_helper
import numpy
#einsum = np.einsum
einsum = lib.einsum
#################################################
# FOLLOWING: #
# <NAME> and <NAME>, #
# J. Chem. Phys. 103, 3561 (1995) Table III #
#################################################
### Section (a)
def make_tau(cc, t2, t1, t1p, kconserv, fac=1., out=None):
nkpts, nocc, nvir = t1.shape
tau1 = numpy.ndarray(t2.shape, dtype=t2.dtype, buffer=out)
tau1[:] = t2
for ki in range(nkpts):
for ka in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki,ka,kj]
tmp = numpy.zeros((nocc,nocc,nvir,nvir),dtype=t2.dtype)
if ki == ka and kj == kb:
tmp += einsum('ia,jb->ijab',t1[ki],t1p[kj])
if ki == kb and kj == ka:
tmp -= einsum('ib,ja->ijab',t1[ki],t1p[kj])
if kj == ka and ki == kb:
tmp -= einsum('ja,ib->ijab',t1[kj],t1p[ki])
if kj == kb and ki == ka:
tmp += einsum('jb,ia->ijab',t1[kj],t1p[ki])
tau1[ki,kj,ka] += fac*0.5*tmp
return tau1
def cc_Fvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
fvv = eris.fock[:,nocc:,nocc:].copy()
# <o(k1)v(k2)||v(k3)v(k4)> = <v(k2)o(k1)||v(k4)v(k3)> = -<v(k2)o(k1)||v(k3)v(k4)>
eris_vovv = -eris.ovvv.transpose(1,0,2,4,3,5,6)
tau_tilde = make_tau(cc,t2,t1,t1,kconserv,fac=0.5)
Fae = numpy.zeros(fvv.shape, t1.dtype)
#kconserv = kpts_helper.get_kconserv(cc._scf.cell, cc.kpts)
for ka in range(nkpts):
Fae[ka] += fvv[ka]
Fae[ka] += -0.5*einsum('me,ma->ae',fov[ka],t1[ka])
for km in range(nkpts):
Fae[ka] += einsum('mf,amef->ae',t1[km],eris_vovv[ka,km,ka])
for kn in range(nkpts):
#kb = kconserv[km,ka,kn]
Fae[ka] += -0.5*einsum('mnaf,mnef->ae',tau_tilde[km,kn,ka],
eris.oovv[km,kn,ka])
return Fae
def cc_Foo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
foo = eris.fock[:,:nocc,:nocc].copy()
tau_tilde = make_tau(cc,t2,t1,t1,kconserv,fac=0.5)
Fmi = numpy.zeros(foo.shape, t1.dtype)
for km in range(nkpts):
Fmi[km] += foo[km]
Fmi[km] += 0.5*einsum('me,ie->mi',fov[km],t1[km])
for kn in range(nkpts):
Fmi[km] += einsum('ne,mnie->mi',t1[kn],eris.ooov[km,kn,km])
for ke in range(nkpts):
Fmi[km] += 0.5*einsum('inef,mnef->mi',tau_tilde[km,kn,ke],
eris.oovv[km,kn,ke])
return Fmi
def cc_Fov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
fov = eris.fock[:,:nocc,nocc:].copy()
Fme = numpy.zeros(fov.shape, t1.dtype)
for km in range(nkpts):
Fme[km] += fov[km]
for kf in range(nkpts):
kn = kf
Fme[km] -= einsum('nf,mnfe->me',t1[kf],eris.oovv[km,kn,kf])
return Fme
def cc_Woooo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wmnij = eris.oooo.copy()
for km in range(nkpts):
for kn in range(nkpts):
# Since it's not enough just to switch i and j and need to create the k_i and k_j
# so that P(ij) switches both i,j and k_i,k_j
# t1[ k_j, j, e ] * v[ k_m, k_n, k_i, m, n, i, e ] -> tmp[ k_i, k_j, m, n, i, j ]
# Here, x = k_j and y = k_i
tmp = einsum('xje,ymnie->yxmnij',t1,eris.ooov[km,kn])
tmp = tmp - tmp.transpose(1,0,2,3,5,4)
ki = numpy.arange(nkpts)
kj = kconserv[km,ki,kn]
kij = (ki,kj)
Wmnij[km,kn,:] += 0.25*einsum('yxijef,xmnef->ymnij',tau[kij],eris.oovv[km,kn])
for ki in range(nkpts):
kj = kconserv[km,ki,kn]
Wmnij[km,kn,ki] += tmp[ki,kj]
return Wmnij
def cc_Wvvvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
eris_vovv = - eris.ovvv.transpose(1,0,2,4,3,5,6)
tau = make_tau(cc,t2,t1,t1,kconserv)
Wabef = eris.vvvv.copy()
for ka in range(nkpts):
for kb in range(nkpts):
km = numpy.arange(nkpts).tolist()
kn = kconserv[ka,km,kb].tolist()
kmn = tuple([km,kn])
Wabef[ka,kb] += 0.25*einsum('xmnab,xymnef->yabef',tau.transpose(2,0,1,3,4,5,6)[ka][kmn],eris.oovv[kmn])
for ke in range(nkpts):
km = kb
tmp = einsum('mb,amef->abef',t1[kb],eris_vovv[ka,km,ke])
km = ka
tmp -= einsum('ma,bmef->abef',t1[ka],eris_vovv[kb,km,ke])
Wabef[ka,kb,ke] += -tmp
# km + kn - ka = kb
# => kn = ka - km + kb
return Wabef
def cc_Wovvo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
eris_oovo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nocc,nvir,nocc),dtype=t2.dtype)
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
# <mb||je> -> -<mb||ej>
eris_ovvo[km,kb,ke] = -eris.ovov[km,kb,kj].transpose(0,1,3,2)
# <mn||je> -> -<mn||ej>
# let kb = kn as a dummy variable
eris_oovo[km,kb,ke] = -eris.ooov[km,kb,kj].transpose(0,1,3,2)
Wmbej = eris_ovvo.copy()
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
Wmbej[km,kb,ke] += einsum('jf,mbef->mbej',t1[kj,:,:],eris.ovvv[km,kb,ke])
Wmbej[km,kb,ke] += -einsum('nb,mnej->mbej',t1[kb,:,:],eris_oovo[km,kb,ke])
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
Wmbej[km,kb,ke] += -0.5*einsum('jnfb,mnef->mbej',t2[kj,kn,kf],
eris.oovv[km,kn,ke])
if kn == kb and kf == kj:
Wmbej[km,kb,ke] += -einsum('jf,nb,mnef->mbej',t1[kj],t1[kn],
eris.oovv[km,kn,ke])
return Wmbej
def cc_Wovvo_jk(cc, t1, t2, eris, kconserv):
nkpts, nocc, nvir = t1.shape
eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
eris_oovo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nocc,nvir,nocc),dtype=t2.dtype)
Wmbej = eris.ovvo.copy()
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
Wmbej[km,kb,ke] += einsum('jf,mbef->mbej',t1[kj,:,:],eris.ovvv[km,kb,ke])
Wmbej[km,kb,ke] += -einsum('nb,mnej->mbej',t1[kb,:,:],eris.oovo[km,kb,ke])
for kn in range(nkpts):
kf = kconserv[km,ke,kn]
Wmbej[km,kb,ke] += -0.5*einsum('jnfb,mnef->mbej',t2[kj,kn,kf],
eris.oovv[km,kn,ke])
if kn == kb and kf == kj:
Wmbej[km,kb,ke] += -einsum('jf,nb,mnef->mbej',t1[kj],t1[kn],
eris.oovv[km,kn,ke])
return Wmbej
### Section (b)
def Fvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
ccFov = cc_Fov(cc,t1,t2,eris,kconserv)
Fae = cc_Fvv(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
Fae[km] -= 0.5*einsum('ma,me->ae', t1[km], ccFov[km])
return Fae
def Foo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
ccFov = cc_Fov(cc,t1,t2,eris,kconserv)
Fmi = cc_Foo(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
Fmi[km] += 0.5*einsum('ie,me->mi',t1[km],ccFov[km])
return Fmi
def Fov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Fme = cc_Fov(cc,t1,t2,eris,kconserv)
return Fme
def Woooo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wmnij = cc_Woooo(cc,t1,t2,eris,kconserv)
for km in range(nkpts):
for kn in range(nkpts):
for ki in range(nkpts):
kj = kconserv[km ,ki, kn]
Wmnij[km, kn, ki] += 0.25*einsum('xijef,xmnef->mnij',tau[ki, kj, :],
eris.oovv[km, kn, :])
return Wmnij
def Wvvvv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
tau = make_tau(cc,t2,t1,t1,kconserv)
Wabef = cc_Wvvvv(cc,t1,t2,eris,kconserv)
for ka, kb, ke in kpts_helper.loop_kkk(nkpts):
kf = kconserv[ka, ke, kb]
for km in range(nkpts):
kn = kconserv[ka, km, kb]
Wabef[ka, kb, ke] += 0.25*einsum('mnab,mnef->abef',tau[km, kn, ka],
eris.oovv[km, kn, ke])
return Wabef
def Wovvo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmbej = cc_Wovvo(cc,t1,t2,eris,kconserv)
for km, kb, ke in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
for kn in range(nkpts):
kf = kconserv[km, ke, kn]
Wmbej[km, kb, ke] -= 0.5*einsum('jnfb,mnef->mbej',t2[kj, kn, kf],
eris.oovv[km, kn, ke])
return Wmbej
# Indices in the following can be safely permuted.
def Wooov(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmnie = eris.ooov.copy()
for km, kn, ki in kpts_helper.loop_kkk(nkpts):
kf = ki
Wmnie[km, kn, ki] += einsum('if,mnfe->mnie',t1[ki], eris.oovv[km, kn, kf])
return Wmnie
def Wvovv(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wamef = numpy.empty((nkpts, nkpts, nkpts, nvir, nocc, nvir, nvir), dtype=eris.ovvv.dtype)
for ka, km, ke in kpts_helper.loop_kkk(nkpts):
kn = ka
Wamef[ka, km, ke] = -eris.ovvv[km, ka, ke].transpose(1, 0, 2, 3)
Wamef[ka, km, ke] -= einsum('na,nmef->amef',t1[kn],eris.oovv[kn, km, ke])
return Wamef
def Wovoo(cc,t1,t2,eris,kconserv):
nkpts, nocc, nvir = t1.shape
Wmbij = eris.ovoo.copy()
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
for kn in range(nkpts):
Wmbij[km, kb, ki] += einsum('mnie,jnbe->mbij', eris.ooov[km, kn, ki], t2[kj, kn, kb])
Wmbij[km, kb, ki] += einsum('ie,mbej->mbij', t1[ki], -eris.ovov[km, kb, kj].transpose(0, 1, 3, 2))
for kf in range(nkpts):
kn = kconserv[kb, kj, kf]
Wmbij[km, kb, ki] -= einsum('ie,njbf,mnef->mbij', t1[ki], t2[kn, kj, kb], eris.oovv[km, kn, ki])
# P(ij)
for kn in range(nkpts):
Wmbij[km, kb, ki] -= einsum('mnje,inbe->mbij', eris.ooov[km, kn, kj], t2[ki, kn, kb])
Wmbij[km, kb, ki] -= einsum('je,mbei->mbij', t1[kj], -eris.ovov[km, kb, ki].transpose(0, 1, 3, 2))
for kf in range(nkpts):
kn = kconserv[kb, ki, kf]
Wmbij[km, kb, ki] += einsum('je,nibf,mnef->mbij', t1[kj], t2[kn, ki, kb], eris.oovv[km, kn, kj])
FFov = Fov(cc,t1,t2,eris,kconserv)
WWoooo = Woooo(cc,t1,t2,eris,kconserv)
tau = make_tau(cc,t2,t1,t1,kconserv)
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Wmbij[km, kb, ki] -= einsum('me,ijbe->mbij', FFov[km], t2[ki, kj, kb])
Wmbij[km, kb, ki] -= einsum('nb,mnij->mbij', t1[kb], WWoooo[km, kb, ki])
for km, kb, ki in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ki, kb]
Wmbij[km, kb, ki] += 0.5 * einsum('xmbef,xijef->mbij', eris.ovvv[km, kb, :], tau[ki, kj, :])
return Wmbij
def Wvvvo(cc,t1,t2,eris,kconserv,WWvvvv=None):
nkpts, nocc, nvir = t1.shape
FFov = Fov(cc,t1,t2,eris,kconserv)
if WWvvvv is None:
WWvvvv = Wvvvv(cc,t1,t2,eris,kconserv)
eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc),dtype=t2.dtype)
for km in range(nkpts):
for kb in range(nkpts):
for ke in range(nkpts):
kj = kconserv[km,ke,kb]
eris_ovvo[km,kb,ke] = -eris.ovov[km,kb,kj].transpose(0,1,3,2)
tmp1 = | numpy.zeros((nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc),dtype=t2.dtype) | numpy.zeros |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def _test_combinations_with_mode_v1(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
def legacy_map_fn(dataset, *args, **kwargs):
return dataset.map_with_legacy_function(*args, **kwargs)
new_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
legacy_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("legacy_map_fn", legacy_map_fn))
return new_map_combinations + legacy_map_combinations
def _test_combinations_with_mode_v2(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
return combinations.combine(
tf_api_version=2,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
def _test_combinations_with_mode(mode):
return _test_combinations_with_mode_v1(
mode) + _test_combinations_with_mode_v2(mode)
def _test_combinations():
return _test_combinations_with_mode("eager") + _test_combinations_with_mode(
"graph")
def _short_circuit_test_cases():
cases = [
("Identity", None, lambda x: x),
("Replicate", None, lambda x: (x, x)),
("Swap", (None, None), lambda x, y: (y, x)),
("Project", (None, None), lambda x, y: x)
]
def reduce_fn(x, y):
name, structure, fn = y
return x + combinations.combine(
structure=structure, fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
class Foo(object):
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = ( | np.arange(7) | numpy.arange |
import json
import os
import numpy as np
import time
from copy import deepcopy
from .track import Track
from .geom import Circle
from .geom import Line
from .geom import check_for_intersection_lineseg_lineseg
from .geom import calc_angle_between_unit_vectors
from .lidar import Lidar
class Vehicle(object):
def __init__(self, id: int, track: Track, aLidarFOVFront: float, aLidarFOVL: float, aLidarFOVR: float, task_rate=0.01, auto_reset=True):
"""
Initialise the vehicle object
"""
# save the arguments
self.id = id
self.track = track
self.aLidarFOVFront = aLidarFOVFront * np.pi / 180
self.aLidarFOVL = aLidarFOVL * np.pi / 180
self.aLidarFOVR = aLidarFOVR * np.pi / 180
self.bAutoReset = auto_reset
self.tTask = task_rate
# Get the module path
self.module_path = os.path.dirname(os.path.abspath(__file__))
# load the config file
with open(self.module_path + '/../setup/vehicle_config.json', 'r') as f:
self.config = json.load(f)
# create the car corner point offsets
self.carFLOffset = np.array([self.config['xVehicleLength'] * self.config['rCOGLongR'], -0.5 * self.config['xVehicleWidth']])
self.carFROffset = | np.array([self.config['xVehicleLength'] * self.config['rCOGLongR'], 0.5 * self.config['xVehicleWidth']]) | numpy.array |
import tensorflow as tf
import numpy as np
import time
import scipy.sparse as sp
from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve, auc
from preprocessing import construct_feed_dict
from outputs import viz_train_val_data, viz_roc_pr_curve, max_gmean_thresh
def train_test_model(adj_norm, adj_label, features, adj_orig, FLAGS, crossval_edges, placeholders, opt, model, model_str, model_timestamp, adj, test_edges, test_edges_false):
acc_cv, ap_cv, roc_cv, f_cv, acc_init_cv, ap_init_cv, roc_init_cv, f_init_cv = ([] for i in range(8))
feed_dict = None
adj_pred = None
iterations = 1 + FLAGS.crossvalidation * (len(adj) - 1)
for cv_set in range(iterations):
print("\nCV run " + str(cv_set+1) + " of " + str(iterations) + "...")
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm[cv_set], adj_label[cv_set], features, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Train model
acc_last, ap_last, roc_last, f_last, acc_init, ap_init, roc_init, f_init, opt_thresh, adj_pred = train_model(adj_orig, FLAGS, [x[cv_set] for x in crossval_edges],
placeholders, opt, model, feed_dict, model_str, model_timestamp)
for x,l in zip([acc_last, ap_last, roc_last, f_last, acc_init, ap_init, roc_init, f_init], [acc_cv, ap_cv, roc_cv, f_cv, acc_init_cv, ap_init_cv, roc_init_cv, f_init_cv]):
l.append(x)
if FLAGS.crossvalidation:
cv_str = str(iterations) + " fold CV "
else:
cv_str = "Validation "
print("\n" + cv_str + "ROC AUC score: " + str(np.round(np.mean(roc_cv), 2)) + " with SD: " + str(np.round(np.std(roc_cv),2)))
print(cv_str + "Average Precision: " + str(np.round(np.mean(ap_cv), 2)) + " with SD: " + str(np.round(np.std(ap_cv),2)))
print(cv_str + "Accuracy: " + str(np.round(np.mean(acc_cv), 2)) + " with SD: " + str(np.round(np.std(acc_cv),2)))
print(cv_str + "F1: " + str(np.round(np.mean(f_cv), 2)) + " with SD: " + str(np.round(np.std(f_cv),2)))
#print('\nTest ROC score: ' + str(np.round(test_roc,2)))
#print('Test AP score: ' + str(np.round(test_ap,2)))
#print('Test accuracy (threshold= ' + str(opt_thresh) + "): " + str(np.round(test_acc,2)))
#print('\nRandom Control ROC score: ' + str(np.round(random_roc,2)))
#print('Random Control AP score: ' + str(np.round(random_ap,2)))
#print('Random Control accuracy: ' + str(np.round(random_acc,2)))
#print('\nAverage Init ROC score: ' + str(np.round(np.mean(roc_init_cv),2)))
#print('Average Init AP score: ' + str(np.round(np.mean(ap_init_cv),2)))
#print('Average Init accuracy: ' + str(np.round(np.mean(acc_init_cv),2)) + '\n')
return np.mean(acc_cv), np.mean(ap_cv), np.mean(roc_cv), np.mean(f_cv), adj_pred
def train_model(adj_orig, FLAGS, edges, placeholders, opt, model, feed_dict, model_str, model_timestamp):
# Initialize session
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
loss_train, kl_train, acc_train, ap_train, roc_train, f_train, loss_val, acc_val, ap_val, roc_val, f_val = ([] for i in range(11))
hist_scores = [loss_train, kl_train, acc_train, ap_train, roc_train, f_train, loss_val, acc_val, ap_val, roc_val, f_val]
#initial metrics
train_edges, train_edges_false, val_edges, val_edges_false = edges
adj_pred = predict_adj(feed_dict, sess, model, model_timestamp, placeholders)
_, _, train_loss, train_acc, train_ap, train_roc, _, train_f, opt_thresh = get_scores(adj_pred, adj_orig, train_edges, train_edges_false, model_timestamp)
_, _, val_loss, val_acc, val_ap, val_roc, _, val_f, _ = get_scores(adj_pred, adj_orig, val_edges, val_edges_false, model_timestamp, thresh=opt_thresh)
train_kl = None
scores = [train_loss, train_kl, train_acc, train_ap, train_roc, train_f, val_loss, val_acc, val_ap, val_roc, val_f]
for x, l in zip(scores, hist_scores):
l.append(x)
for epoch in range(FLAGS.epochs):
t = time.time()
# Run single weight update
if model_str == 'gcn_vae':
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy, opt.kl], feed_dict=feed_dict)
else:
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)
# Compute metrics
adj_pred = predict_adj(feed_dict, sess, model, model_timestamp, placeholders)
ctrl_cost = outs[1]
ctrl_accuracy = outs[2]
if model_str == 'gcn_vae':
train_kl = outs[3]
else:
train_kl = 0
_, total_train_acc, train_loss, train_acc, train_ap, train_roc, train_rp, train_f, opt_thresh = get_scores(adj_pred, adj_orig, train_edges, train_edges_false, model_timestamp)
_, _, val_loss, val_acc, val_ap, val_roc, val_rp, val_f, _ = get_scores(adj_pred, adj_orig, val_edges, val_edges_false, model_timestamp, thresh=opt_thresh)
scores = [train_loss, train_kl, train_acc, train_ap, train_roc, train_f, val_loss, val_acc, val_ap, val_roc, val_f]
for x, l in zip(scores, hist_scores):
l.append(x)
print("Epoch:", '%04d' % (epoch + 1),
#"time=", "{:.5f}".format(time.time() - t),
"train_loss=", "{:.5f}".format(train_loss),
#"train_loss_control=", "{:.5f}".format(ctrl_cost),
#"recon loss=", "{:.5f}".format(train_loss-train_kl), "kl_loss=", "{:.5f}".format(train_kl),
"val_loss=", "{:.5f}".format(val_loss),
#"train_acc_control=", "{:.5f}".format(ctrl_accuracy),
#"total_train_acc=", "{:.5f}".format(total_train_acc),
"train_acc=", "{:.5f}".format(train_acc),
#"train_ap=", "{:.5f}".format(train_ap), "train_roc=", "{:.5f}".format(train_roc),
"val_acc=", "{:.5f}".format(val_acc),
"val_ap=", "{:.5f}".format(val_ap),
"val_rp=", "{:.5f}".format(val_rp),
"val_roc=", "{:.5f}".format(val_roc),
"val_f=", "{:.5f}".format(val_f))
if epoch > FLAGS.early_stopping and loss_val[-1] > np.mean(loss_val[-(FLAGS.early_stopping+1):-1]):
print("\nEarly stopping...")
break
# Plot training & validation metrics
viz_train_val_data(hist_scores, model_str, model_timestamp)
#Get final predicted adj matrix
adj_pred = sigmoid(predict_adj(feed_dict, sess, model, model_timestamp, placeholders))
#Resulting ROC curve
#viz_roc = True
#get_scores(adj_pred, adj_orig, test_edges, test_edges_false, model_timestamp, viz_roc=viz_roc, thresh=opt_thresh)
#best_epoch = roc_val.index(max(roc_val))
#print("Best Epoch (ROC): " + str(best_epoch))
##added to save val edges and val edges false to get scores of the correct val edges
print("Optimization Finished!")
sess.close()
return acc_val[-1], ap_val[-1], roc_val[-1], f_val[-1], acc_val[0], ap_val[0], roc_val[0], f_val[0], opt_thresh, adj_pred
def predict_adj(feed_dict, sess, model, model_timestamp, placeholders, emb=None):
if emb is None:
feed_dict.update({placeholders['dropout']: 0})
emb = sess.run(model.z_mean, feed_dict=feed_dict)
adj_rec = np.dot(emb, emb.T)
return adj_rec
def get_scores(adj_rec, adj_orig, edges_pos, edges_neg, model_timestamp, viz_roc=False, random=False, thresh=None):
if random:
adj_rec = random_adj(adj_rec, (adj_orig.sum()-adj_rec.sum())/2, mode="random_normal")
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
precision, recall, _ = precision_recall_curve(labels_all, preds_all)
rp_auc = auc(recall, precision)
f_score = 2 * (np.mean(precision) * np.mean(recall)) / (np.mean(precision) + np.mean(recall))
if viz_roc:
viz_roc_pr_curve(preds_all, labels_all, model_timestamp)
if thresh is None:
fpr, tpr, thresholds = roc_curve(labels_all, preds_all)
thresh, _, _, _ = max_gmean_thresh(fpr, tpr, thresholds)
#Total accuracy and loss
adj_curr = adj_from_edges(edges_pos, adj_orig.shape)
adj_curr = adj_curr.reshape(1, -1)
adj_rec = adj_rec.reshape(1, -1)
pos_weight = float(adj_orig.shape[0] * adj_orig.shape[0] - (edges_pos.shape[0] * 2)) / (edges_pos.shape[0] * 2)
norm = adj_orig.shape[0] * adj_orig.shape[0] / float((adj_orig.shape[0] * adj_orig.shape[0] - (edges_pos.shape[0] * 2)) * 2)
cost_total = norm * np.mean(weighted_cross_entropy_with_logits(adj_curr, adj_rec, pos_weight))
correct_prediction = (sigmoid(adj_rec) > thresh) == adj_curr
accuracy_total = np.mean(correct_prediction)
#Subset accuracy and loss
test_mask = adj_from_edges(np.vstack([edges_pos, edges_neg]), adj_orig.shape, diag=0)
test_mask = test_mask.reshape(1, -1)
accuracy = np.mean(correct_prediction[test_mask==1])
cost = np.mean(weighted_cross_entropy_with_logits(adj_curr[test_mask==1], adj_rec[test_mask==1], 1))
return cost_total, accuracy_total, cost, accuracy, ap_score, roc_score, rp_auc, f_score, thresh
def weighted_cross_entropy_with_logits(label, pred, pos_weight):
return ((1 - label) * pred + (1 + (pos_weight - 1) * label) * (np.log(1 + np.exp(-abs(pred))) + np.maximum(-pred, 0)))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def adj_from_edges(edges, shape, diag=1):
data = | np.ones(edges.shape[0]) | numpy.ones |
"""Transformers for numerical data."""
import copy
import sys
import warnings
import numpy as np
import pandas as pd
import scipy
from sklearn.mixture import BayesianGaussianMixture
from rdt.transformers.base import BaseTransformer
from rdt.transformers.null import NullTransformer
EPSILON = np.finfo(np.float32).eps
MAX_DECIMALS = sys.float_info.dig - 1
class FloatFormatter(BaseTransformer):
"""Transformer for numerical data.
This transformer replaces integer values with their float equivalent.
Non null float values are not modified.
Null values are replaced using a ``NullTransformer``.
Args:
missing_value_replacement (object or None):
Indicate what to do with the null values. If an integer or float is given,
replace them with the given value. If the strings ``'mean'`` or ``'mode'`` are
given, replace them with the corresponding aggregation. If ``None`` is given,
do not replace them. Defaults to ``None``.
model_missing_values (bool):
Whether to create a new column to indicate which values were null or not. The column
will be created only if there are null values. If ``True``, create the new column if
there are null values. If ``False``, do not create the new column even if there
are null values. Defaults to ``False``.
learn_rounding_scheme (bool):
Whether or not to learn what place to round to based on the data seen during ``fit``.
If ``True``, the data returned by ``reverse_transform`` will be rounded to that place.
Defaults to ``False``.
enforce_min_max_values (bool):
Whether or not to clip the data returned by ``reverse_transform`` to the min and
max values seen during ``fit``. Defaults to ``False``.
"""
INPUT_SDTYPE = 'numerical'
DETERMINISTIC_TRANSFORM = True
DETERMINISTIC_REVERSE = True
COMPOSITION_IS_IDENTITY = True
null_transformer = None
missing_value_replacement = None
_dtype = None
_rounding_digits = None
_min_value = None
_max_value = None
def __init__(self, missing_value_replacement=None, model_missing_values=False,
learn_rounding_scheme=False, enforce_min_max_values=False):
self.missing_value_replacement = missing_value_replacement
self.model_missing_values = model_missing_values
self.learn_rounding_scheme = learn_rounding_scheme
self.enforce_min_max_values = enforce_min_max_values
def get_output_sdtypes(self):
"""Return the output sdtypes supported by the transformer.
Returns:
dict:
Mapping from the transformed column names to supported sdtypes.
"""
output_sdtypes = {
'value': 'float',
}
if self.null_transformer and self.null_transformer.models_missing_values():
output_sdtypes['is_null'] = 'float'
return self._add_prefix(output_sdtypes)
def is_composition_identity(self):
"""Return whether composition of transform and reverse transform produces the input data.
Returns:
bool:
Whether or not transforming and then reverse transforming returns the input data.
"""
if self.null_transformer and not self.null_transformer.models_missing_values():
return False
return self.COMPOSITION_IS_IDENTITY
@staticmethod
def _learn_rounding_digits(data):
# check if data has any decimals
data = np.array(data)
roundable_data = data[~(np.isinf(data) | pd.isna(data))]
if ((roundable_data % 1) != 0).any():
if not (roundable_data == roundable_data.round(MAX_DECIMALS)).all():
return None
for decimal in range(MAX_DECIMALS + 1):
if (roundable_data == roundable_data.round(decimal)).all():
return decimal
elif len(roundable_data) > 0:
maximum = max(abs(roundable_data))
start = int(np.log10(maximum)) if maximum != 0 else 0
for decimal in range(-start, 1):
if (roundable_data == roundable_data.round(decimal)).all():
return decimal
return None
def _fit(self, data):
"""Fit the transformer to the data.
Args:
data (pandas.Series):
Data to fit.
"""
self._dtype = data.dtype
if self.enforce_min_max_values:
self._min_value = data.min()
self._max_value = data.max()
if self.learn_rounding_scheme:
self._rounding_digits = self._learn_rounding_digits(data)
self.null_transformer = NullTransformer(
self.missing_value_replacement,
self.model_missing_values
)
self.null_transformer.fit(data)
def _transform(self, data):
"""Transform numerical data.
Integer values are replaced by their float equivalent. Non null float values
are left unmodified.
Args:
data (pandas.Series):
Data to transform.
Returns:
numpy.ndarray
"""
return self.null_transformer.transform(data)
def _reverse_transform(self, data):
"""Convert data back into the original format.
Args:
data (pd.Series or numpy.ndarray):
Data to transform.
Returns:
numpy.ndarray
"""
if not isinstance(data, np.ndarray):
data = data.to_numpy()
if self.missing_value_replacement is not None:
data = self.null_transformer.reverse_transform(data)
if self.enforce_min_max_values:
data = data.clip(self._min_value, self._max_value)
is_integer = np.dtype(self._dtype).kind == 'i'
if self.learn_rounding_scheme or is_integer:
data = data.round(self._rounding_digits or 0)
if pd.isna(data).any() and is_integer:
return data
return data.astype(self._dtype)
class GaussianNormalizer(FloatFormatter):
r"""Transformer for numerical data based on copulas transformation.
Transformation consists on bringing the input data to a standard normal space
by using a combination of *cdf* and *inverse cdf* transformations:
Given a variable :math:`x`:
- Find the best possible marginal or use user specified one, :math:`P(x)`.
- do :math:`u = \phi (x)` where :math:`\phi` is cumulative density function,
given :math:`P(x)`.
- do :math:`z = \phi_{N(0,1)}^{-1}(u)`, where :math:`\phi_{N(0,1)}^{-1}` is
the *inverse cdf* of a *standard normal* distribution.
The reverse transform will do the inverse of the steps above and go from :math:`z`
to :math:`u` and then to :math:`x`.
Args:
model_missing_values (bool):
Whether to create a new column to indicate which values were null or not. The column
will be created only if there are null values. If ``True``, create the new column if
there are null values. If ``False``, do not create the new column even if there
are null values. Defaults to ``False``.
learn_rounding_scheme (bool):
Whether or not to learn what place to round to based on the data seen during ``fit``.
If ``True``, the data returned by ``reverse_transform`` will be rounded to that place.
Defaults to ``False``.
enforce_min_max_values (bool):
Whether or not to clip the data returned by ``reverse_transform`` to the min and
max values seen during ``fit``. Defaults to ``False``.
distribution (copulas.univariate.Univariate or str):
Copulas univariate distribution to use. Defaults to ``truncated_gaussian``.
Options include:
* ``gaussian``: Use a Gaussian distribution.
* ``gamma``: Use a Gamma distribution.
* ``beta``: Use a Beta distribution.
* ``student_t``: Use a Student T distribution.
* ``gussian_kde``: Use a GaussianKDE distribution. This model is non-parametric,
so using this will make ``get_parameters`` unusable.
* ``truncated_gaussian``: Use a Truncated Gaussian distribution.
"""
_univariate = None
COMPOSITION_IS_IDENTITY = False
def __init__(self, model_missing_values=False, learn_rounding_scheme=False,
enforce_min_max_values=False, distribution='truncated_gaussian'):
super().__init__(
missing_value_replacement='mean',
model_missing_values=model_missing_values,
learn_rounding_scheme=learn_rounding_scheme,
enforce_min_max_values=enforce_min_max_values
)
self.distribution = distribution # Distribution initialized by the user
self._distributions = self._get_distributions()
if isinstance(distribution, str):
distribution = self._distributions[distribution]
self._distribution = distribution
@staticmethod
def _get_distributions():
try:
from copulas import univariate # pylint: disable=import-outside-toplevel
except ImportError as error:
error.msg += (
'\n\nIt seems like `copulas` is not installed.\n'
'Please install it using:\n\n pip install rdt[copulas]'
)
raise
return {
'gaussian': univariate.GaussianUnivariate,
'gamma': univariate.GammaUnivariate,
'beta': univariate.BetaUnivariate,
'student_t': univariate.StudentTUnivariate,
'gaussian_kde': univariate.GaussianKDE,
'truncated_gaussian': univariate.TruncatedGaussian,
}
def _get_univariate(self):
distribution = self._distribution
if any(isinstance(distribution, dist) for dist in self._distributions.values()):
return copy.deepcopy(distribution)
if isinstance(distribution, tuple):
return distribution[0](**distribution[1])
if isinstance(distribution, type) and distribution in self._distributions.values():
return distribution()
raise TypeError(f'Invalid distribution: {distribution}')
def _fit(self, data):
"""Fit the transformer to the data.
Args:
data (pandas.Series):
Data to fit to.
"""
self._univariate = self._get_univariate()
super()._fit(data)
data = super()._transform(data)
if data.ndim > 1:
data = data[:, 0]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._univariate.fit(data)
def _copula_transform(self, data):
cdf = self._univariate.cdf(data)
return scipy.stats.norm.ppf(cdf.clip(0 + EPSILON, 1 - EPSILON))
def _transform(self, data):
"""Transform numerical data.
Args:
data (pandas.Series):
Data to transform.
Returns:
numpy.ndarray
"""
transformed = super()._transform(data)
if transformed.ndim > 1:
transformed[:, 0] = self._copula_transform(transformed[:, 0])
else:
transformed = self._copula_transform(transformed)
return transformed
def _reverse_transform(self, data):
"""Convert data back into the original format.
Args:
data (pd.Series or numpy.ndarray):
Data to transform.
Returns:
pandas.Series
"""
if not isinstance(data, np.ndarray):
data = data.to_numpy()
if data.ndim > 1:
data[:, 0] = self._univariate.ppf(scipy.stats.norm.cdf(data[:, 0]))
else:
data = self._univariate.ppf(scipy.stats.norm.cdf(data))
return super()._reverse_transform(data)
class ClusterBasedNormalizer(FloatFormatter):
"""Transformer for numerical data using a Bayesian Gaussian Mixture Model.
This transformation takes a numerical value and transforms it using a Bayesian GMM
model. It generates two outputs, a discrete value which indicates the selected
'component' of the GMM and a continuous value which represents the normalized value
based on the mean and std of the selected component.
Args:
model_missing_values (bool):
Whether to create a new column to indicate which values were null or not. The column
will be created only if there are null values. If ``True``, create the new column if
there are null values. If ``False``, do not create the new column even if there
are null values. Defaults to ``False``.
learn_rounding_scheme (bool):
Whether or not to learn what place to round to based on the data seen during ``fit``.
If ``True``, the data returned by ``reverse_transform`` will be rounded to that place.
Defaults to ``False``.
enforce_min_max_values (bool):
Whether or not to clip the data returned by ``reverse_transform`` to the min and
max values seen during ``fit``. Defaults to ``False``.
max_clusters (int):
The maximum number of mixture components. Depending on the data, the model may select
fewer components (based on the ``weight_threshold``).
Defaults to 10.
weight_threshold (int, float):
The minimum value a component weight can take to be considered a valid component.
``weights_`` under this value will be ignored.
Defaults to 0.005.
Attributes:
_bgm_transformer:
An instance of sklearn`s ``BayesianGaussianMixture`` class.
valid_component_indicator:
An array indicating the valid components. If the weight of a component is greater
than the ``weight_threshold``, it's indicated with True, otherwise it's set to False.
"""
STD_MULTIPLIER = 4
DETERMINISTIC_TRANSFORM = False
DETERMINISTIC_REVERSE = True
COMPOSITION_IS_IDENTITY = False
_bgm_transformer = None
valid_component_indicator = None
def __init__(self, model_missing_values=False, learn_rounding_scheme=False,
enforce_min_max_values=False, max_clusters=10, weight_threshold=0.005):
super().__init__(
missing_value_replacement='mean',
model_missing_values=model_missing_values,
learn_rounding_scheme=learn_rounding_scheme,
enforce_min_max_values=enforce_min_max_values
)
self.max_clusters = max_clusters
self.weight_threshold = weight_threshold
def get_output_sdtypes(self):
"""Return the output sdtypes supported by the transformer.
Returns:
dict:
Mapping from the transformed column names to supported sdtypes.
"""
output_sdtypes = {
'normalized': 'float',
'component': 'categorical'
}
if self.null_transformer and self.null_transformer.models_missing_values():
output_sdtypes['is_null'] = 'float'
return self._add_prefix(output_sdtypes)
def _fit(self, data):
"""Fit the transformer to the data.
Args:
data (pandas.Series):
Data to fit to.
"""
self._bgm_transformer = BayesianGaussianMixture(
n_components=self.max_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
n_init=1
)
super()._fit(data)
data = super()._transform(data)
if data.ndim > 1:
data = data[:, 0]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._bgm_transformer.fit(data.reshape(-1, 1))
self.valid_component_indicator = self._bgm_transformer.weights_ > self.weight_threshold
def _transform(self, data):
"""Transform the numerical data.
Args:
data (pandas.Series):
Data to transform.
Returns:
numpy.ndarray.
"""
data = super()._transform(data)
if data.ndim > 1:
data, model_missing_values = data[:, 0], data[:, 1]
data = data.reshape((len(data), 1))
means = self._bgm_transformer.means_.reshape((1, self.max_clusters))
stds = np.sqrt(self._bgm_transformer.covariances_).reshape((1, self.max_clusters))
normalized_values = (data - means) / (self.STD_MULTIPLIER * stds)
normalized_values = normalized_values[:, self.valid_component_indicator]
component_probs = self._bgm_transformer.predict_proba(data)
component_probs = component_probs[:, self.valid_component_indicator]
selected_component = np.zeros(len(data), dtype='int')
for i in range(len(data)):
component_prob_t = component_probs[i] + 1e-6
component_prob_t = component_prob_t / component_prob_t.sum()
selected_component[i] = np.random.choice(
np.arange(self.valid_component_indicator.sum()),
p=component_prob_t
)
aranged = np.arange(len(data))
normalized = normalized_values[aranged, selected_component].reshape([-1, 1])
normalized = np.clip(normalized, -.99, .99)
normalized = normalized[:, 0]
rows = [normalized, selected_component]
if self.null_transformer and self.null_transformer.models_missing_values():
rows.append(model_missing_values)
return np.stack(rows, axis=1) # noqa: PD013
def _reverse_transform_helper(self, data):
normalized = | np.clip(data[:, 0], -1, 1) | numpy.clip |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 23:56:16 2019
@author: kirichoi
"""
import os, sys
import tellurium as te
import roadrunner
import numpy as np
import antimony
import scipy.optimize
import networkGenerator as ng
import time
import copy
def f1(k_list, *args):
global counts
global countf
args[0].reset()
args[0].setValues(args[0].getGlobalParameterIds(), k_list)
try:
args[0].steadyStateApproximate()
objCCC = args[0].getScaledConcentrationControlCoefficientMatrix()
objCCC[np.abs(objCCC) < 1e-12] = 0 # Set small values to zero
if np.isnan(objCCC).any():
dist_obj = 10000
else:
if args[3]:
objFlux = args[0].getReactionRates()
objFlux[np.abs(objFlux) < 1e-12] = 0 # Set small values to zero
# objFCC = args[0].getScaledFluxControlCoefficientMatrix()
# objFCC[np.abs(objFCC) < 1e-12] = 0 # Set small values to zero
objCCC_row = objCCC.rownames
objCCC_col = objCCC.colnames
objCCC = objCCC[np.argsort(objCCC_row)]
objCCC = objCCC[:,np.argsort(objCCC_col)]
if args[3]:
objFlux = objFlux[np.argsort(objCCC_col)]
dist_obj = (((np.linalg.norm(args[1] - objCCC)) + (np.linalg.norm(args[2] - objFlux))) *
((1 + np.sum(np.equal(np.sign(np.array(args[1])), np.sign(np.array(objCCC))))) +
(1 + np.sum(np.equal(np.sign(np.array(args[2])), np.sign(np.array(objFlux)))))))
else:
dist_obj = ((np.linalg.norm(args[1] - objCCC))*(1 +
np.sum(np.not_equal(np.sign(np.array(args[1])),
np.sign(np.array(objCCC))))))
except:
countf += 1
dist_obj = 10000
counts += 1
return dist_obj
def callbackF(X, convergence=0.):
global counts
global countf
print(str(counts) + ", " + str(countf))
return False
def initialize(Parameters):
global countf
global counts
numBadModels = 0
numGoodModels = 0
numIter = 0
ens_dist = np.empty(Parameters.ens_size)
ens_model = np.empty(Parameters.ens_size, dtype='object')
ens_rl = np.empty(Parameters.ens_size, dtype='object')
rl_track = []
rl_track.append(Parameters.knownReactionList)
# Initial Random generation
while (numGoodModels < Parameters.ens_size):
# Ensure no redundant model
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
while rl in rl_track:
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
antStr = ng.generateAntimony(Parameters.realFloatingIds, Parameters.realBoundaryIds, stt[1],
stt[2], rl, boundary_init=Parameters.realBoundaryVal)
try:
r = te.loada(antStr)
counts = 0
countf = 0
r.steadyStateApproximate()
p_bound = ng.generateParameterBoundary(r.getGlobalParameterIds())
res = scipy.optimize.differential_evolution(f1,
args=(r, Parameters.realConcCC, Parameters.realFlux, Parameters.FLUX),
bounds=p_bound,
maxiter=Parameters.optiMaxIter,
tol=Parameters.optiTol,
polish=Parameters.optiPolish,
seed=Parameters.r_seed)
if not res.success:
numBadModels += 1
else:
# TODO: Might be able to cut the bottom part by simply using
# the obj func value from optimizer
r = te.loada(antStr)
r.setValues(r.getGlobalParameterIds(), res.x)
r.steadyStateApproximate()
SS_i = r.getFloatingSpeciesConcentrations()
r.steadyStateApproximate()
if np.any(SS_i < 1e-5) or np.any(SS_i > 1e5):
numBadModels += 1
else:
concCC_i = r.getScaledConcentrationControlCoefficientMatrix()
if Parameters.FLUX:
flux_i = r.getReactionRates()
if np.isnan(concCC_i).any():
numBadModels += 1
else:
concCC_i[np.abs(concCC_i) < 1e-12] = 0 # Set small values to zero
if Parameters.FLUX:
flux_i[np.abs(flux_i) < 1e-12] = 0 # Set small values to zero
concCC_i_row = concCC_i.rownames
concCC_i_col = concCC_i.colnames
concCC_i = concCC_i[np.argsort(concCC_i_row)]
concCC_i = concCC_i[:,np.argsort(concCC_i_col)]
if Parameters.FLUX:
flux_i = flux_i[np.argsort(concCC_i_col)]
dist_i = (((np.linalg.norm(Parameters.realConcCC - concCC_i)) +
(np.linalg.norm(Parameters.realFlux - flux_i))) *
((1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))) +
(1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realFlux)),
np.sign(np.array(flux_i)))))))
else:
dist_i = ((np.linalg.norm(Parameters.realConcCC - concCC_i))*(1 +
np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))))
ens_dist[numGoodModels] = dist_i
r.reset()
ens_model[numGoodModels] = r.getAntimony(current=True)
ens_rl[numGoodModels] = rl
rl_track.append(rl)
numGoodModels = numGoodModels + 1
except:
numBadModels = numBadModels + 1
antimony.clearPreviousLoads()
numIter = numIter + 1
if int(numIter/1000) == (numIter/1000):
print("Number of iterations = " + str(numIter))
if int(numIter/10000) == (numIter/10000):
print("Number of good models = " + str(numGoodModels))
print("In generation: 1")
print("Number of total iterations = " + str(numIter))
print("Number of bad models = " + str(numBadModels))
return ens_dist, ens_model, ens_rl, rl_track
def mutate_and_evaluate(Parameters, listantStr, listdist, listrl, rl_track):
global countf
global counts
eval_dist = np.empty(Parameters.mut_size)
eval_model = np.empty(Parameters.mut_size, dtype='object')
eval_rl = np.empty(Parameters.mut_size, dtype='object')
for m in Parameters.mut_range:
o = 0
rl = ng.generateMutation(Parameters, listrl[m], listantStr[m])
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes( | np.array(st) | numpy.array |
import numpy as np
from pandas import DataFrame
import os, os.path
import csv
from itertools import groupby
from time import time
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
v0 = tf.__version__[0]
if v0 == '2':
# For tensorflow 2, keras is included in tf
import tensorflow.keras.backend as K
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Input, Dense, Conv1D, Dropout, GlobalAveragePooling1D, multiply
from tensorflow.python.keras.layers.core import *
from tensorflow.keras.models import *
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.resnet50 import preprocess_input as preprocess_input_ResNet50
from tensorflow.keras.applications.vgg16 import preprocess_input as preprocess_input_VGG16
from tensorflow.keras.applications.mobilenet import preprocess_input as preprocess_input_MobileNet
elif v0 == '1':
#For tensorflow 1.2.0
import keras.backend as K
from keras import optimizers
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Input, Dense, Conv1D, Dropout, GlobalAveragePooling1D, merge
from keras.layers.core import *
from keras.models import *
from keras.utils import to_categorical, plot_model
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.resnet50 import preprocess_input as preprocess_input_ResNet50
from keras.applications.vgg16 import preprocess_input as preprocess_input_VGG16
from keras.applications.mobilenet import preprocess_input as preprocess_input_MobileNet
else:
sys.exit('Tensorflow version should be 1.X or 2.X')
def generator(features,
features_type,
annot,
batch_size,
seq_length,
output_form,
output_class_weights,
img_width,
img_height,
cnnType):
"""
Generator function for batch training models
features: [preprocessed features (numpy array (1, time_steps, nb_features)), images_path (list of strings)]
"""
if features_type == 'frames':
total_length_round = (len(features[1])//seq_length)*seq_length
elif features_type == 'features' or features_type == 'both':
total_length_round = (features[0].shape[1]//seq_length)*seq_length
feature_number = features[0].shape[2]
else:
sys.exit('Wrong features type')
batch_size_time = np.min([batch_size*seq_length, total_length_round])
if features_type == 'frames' or features_type == 'both':
batch_frames = np.zeros((1, batch_size_time, img_width, img_height, 3))
if features_type == 'features' or features_type == 'both':
batch_features = np.zeros((1, batch_size_time, feature_number))
if output_class_weights != []:
if output_form == 'mixed':
annot_labels_weight = []#np.ones((1, annot[0].shape[1]))
batch_labels_weight = []#np.zeros((1, batch_size_time))
labels_number = len(annot)
for i_label_cat in range(labels_number):
annot_labels_weight_tmp = np.zeros((1, annot[i_label_cat].shape[1]))
nClasses = annot[i_label_cat].shape[2]
for iClass in range(nClasses):
annot_labels_weight_tmp[0, np.argmax(annot[i_label_cat][0,:,:],axis=1)==iClass] = output_class_weights[i_label_cat][iClass]
annot_labels_weight.append(annot_labels_weight_tmp)# = annot_labels_weight*annot_labels_weight_tmp
batch_labels_weight.append(np.zeros((1, batch_size_time)))
elif output_form == 'sign_types':
nClasses = annot.shape[2]
annot_labels_weight=np.zeros((1, annot.shape[1]))
batch_labels_weight = np.zeros((1, batch_size_time))
for iClass in range(nClasses):
annot_labels_weight[0, np.argmax(annot[0,:,:],axis=1)==iClass] = output_class_weights[0][iClass]
if output_form == 'mixed':
batch_labels = []
labels_number = len(annot)
labels_shape = []
for i_label_cat in range(labels_number):
labels_shape.append(annot[i_label_cat].shape[2])
batch_labels.append(np.zeros((1, batch_size_time, labels_shape[i_label_cat])))
elif output_form == 'sign_types':
labels_shape = annot.shape[2]
batch_labels = np.zeros((1, batch_size_time, labels_shape))
else:
sys.exit('Wrong annotation format')
while True:
# Random start
random_ini = np.random.randint(0, total_length_round)
end = random_ini + batch_size_time
end_modulo = np.mod(end, total_length_round)
# Fill in batch features
if features_type == 'features' or features_type == 'both':
batch_features = batch_features.reshape(1, batch_size_time, feature_number)
if end <= total_length_round:
batch_features = np.copy(features[0][0, random_ini:end, :])
else:
batch_features[0, :(total_length_round - random_ini), :] = np.copy(features[0][0, random_ini:total_length_round, :])
batch_features[0, (total_length_round - random_ini):, :] = np.copy(features[0][0, 0:end_modulo, :])
batch_features = batch_features.reshape(-1, seq_length, feature_number)
if features_type == 'frames' or features_type == 'both':
batch_frames = batch_frames.reshape(1, batch_size_time, img_width, img_height, 3)
if end <= total_length_round:
for iFrame in range(random_ini, end):
if cnnType=='resnet':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='vgg':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='mobilenet':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
else:
sys.exit('Invalid CNN network model')
else:
for iFrame in range(random_ini,total_length_round):
if cnnType=='resnet':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='vgg':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='mobilenet':
batch_frames[0, iFrame-random_ini, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
else:
sys.exit('Invalid CNN network model')
for iFrame in range(0, end_modulo):
if cnnType=='resnet':
batch_frames[0, iFrame+total_length_round-random_ini, :, :, :] = preprocess_input_ResNet50(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='vgg':
batch_frames[0, iFrame+total_length_round-random_ini, :, :, :] = preprocess_input_VGG16(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
elif cnnType=='mobilenet':
batch_frames[0, iFrame+total_length_round-random_ini, :, :, :] = preprocess_input_MobileNet(img_to_array(load_img(features[1][iFrame], target_size=(img_width, img_height))))
else:
sys.exit('Invalid CNN network model')
batch_frames = batch_frames.reshape(-1, seq_length, img_width, img_height, 3)
# Fill in batch weights
if output_class_weights != []:
if output_form == 'mixed':
for i_label_cat in range(labels_number):
batch_labels_weight[i_label_cat] = batch_labels_weight[i_label_cat].reshape(1, batch_size_time)
if end <= total_length_round:
batch_labels_weight[i_label_cat] = np.copy(annot_labels_weight[i_label_cat][0, random_ini:end])
else:
batch_labels_weight[i_label_cat][0, :(total_length_round - random_ini)] = np.copy(annot_labels_weight[i_label_cat][0, random_ini:total_length_round])
batch_labels_weight[i_label_cat][0, (total_length_round - random_ini):] = np.copy(annot_labels_weight[i_label_cat][0, 0:end_modulo])
batch_labels_weight[i_label_cat] = batch_labels_weight[i_label_cat].reshape(-1, seq_length)
elif output_form == 'sign_types':
batch_labels_weight = batch_labels_weight.reshape(1, batch_size_time)
if end <= total_length_round:
batch_labels_weight = np.copy(annot_labels_weight[0, random_ini:end])
else:
batch_labels_weight[0, :(total_length_round - random_ini)] = np.copy(annot_labels_weight[0, random_ini:total_length_round])
batch_labels_weight[0, (total_length_round - random_ini):] = np.copy(annot_labels_weight[0, 0:end_modulo])
batch_labels_weight = batch_labels_weight.reshape(-1, seq_length)
# Fill in batch annotations
if output_form == 'mixed':
for i_label_cat in range(labels_number):
batch_labels[i_label_cat] = batch_labels[i_label_cat].reshape(1, batch_size_time, labels_shape[i_label_cat])
if end <= total_length_round:
batch_labels[i_label_cat] = np.copy(annot[i_label_cat][0, random_ini:end, :])
else:
batch_labels[i_label_cat][0, :(total_length_round - random_ini), :] = np.copy(annot[i_label_cat][0, random_ini:total_length_round, :])
batch_labels[i_label_cat][0, (total_length_round - random_ini):, :] = np.copy(annot[i_label_cat][0, 0:end_modulo, :])
batch_labels[i_label_cat] = batch_labels[i_label_cat].reshape(-1, seq_length, labels_shape[i_label_cat])
elif output_form == 'sign_types':
batch_labels = batch_labels.reshape(1, batch_size_time, labels_shape)
if end <= total_length_round:
batch_labels = np.copy(annot[0, random_ini:end, :])
else:
batch_labels[0, :(total_length_round - random_ini), :] = np.copy(annot[0, random_ini:total_length_round, :])
batch_labels[0, (total_length_round - random_ini):, :] = np.copy(annot[0, 0:end_modulo, :])
batch_labels = batch_labels.reshape(-1, seq_length, labels_shape)
if output_class_weights != []:
if features_type == 'features':
yield batch_features, batch_labels, batch_labels_weight
elif features_type == 'frames':
yield batch_frames, batch_labels, batch_labels_weight
elif features_type == 'both':
yield [batch_features, batch_frames], batch_labels, batch_labels_weight
else:
if features_type == 'features':
yield batch_features, batch_labels
elif features_type == 'frames':
yield batch_frames, batch_labels
elif features_type == 'both':
yield [batch_features, batch_frames], batch_labels
def train_model(model,
features_train,
annot_train,
features_valid,
annot_valid,
batch_size,
epochs,
seq_length,
features_type='features',
output_class_weights=[],
earlyStopping=False,
save='no',
saveMonitor='val_loss',
saveMonitorMode='min',
saveBestName='',
reduceLrOnPlateau=False,
reduceLrMonitor='val_loss',
reduceLrMonitorMode='min',
reduceLrPatience=7,
reduceLrFactor=0.8,
img_width=224,
img_height=224,
cnnType='resnet'):
"""
Trains a keras model.
Inputs:
model: keras model
features_train: [numpy array of features [1, time_steps_train, features], list of images (if CNN is used)]
annot_train: either list of annotation arrays (output_form: 'mixed')
or one binary array (output_form: 'sign_types')
features_valid: [numpy array of features [1, time_steps_valid, features], list of images (if CNN is used)]
annot_valid: either list of annotation arrays (output_form: 'mixed')
or one binary array (output_form: 'sign_types')
batch_size
output_class_weights: list of vector of weights for each class of each output
save: for saving the models ('no' or 'best' or 'all')
Outputs:
?
"""
if type(annot_train) == list:
output_form = 'mixed'
elif type(annot_train) == np.ndarray:
output_form = 'sign_types'
else:
sys.exit('Wrong annotation format')
if output_form == 'mixed':
annot_categories_number = len(annot_train)
if features_type == 'frames':
time_steps_train = len(features_train[1])
time_steps_valid = len(features_valid[1])
elif features_type == 'features' or features_type == 'both':
time_steps_train = features_train[0].shape[1]
time_steps_valid = features_valid[0].shape[1]
else:
sys.exit('Wrong features type')
total_length_train_round = (time_steps_train // seq_length) * seq_length
batch_size_time = | np.min([batch_size * seq_length, total_length_train_round]) | numpy.min |
# -*- coding: utf-8 -*-
"""
Level diagram calculations for atoms dressed by rydberg levels.
The dressing is achieved by a AC electromagnetic field (laser).
Most of the code here is from the module calculations_atom_pairstate.py.
This one add the AC field and the ground state to the Hamiltonian and diagonalizes it.
Example:
Calculation of the eigenstates when the laser light is near resonant with the transition
:math:`|~5~P_{3/2}~m_j=1/2\\rangle` -> `|60~S_{1/2}~m_j=1/2\\rangle` state. Colour
highlights the mixture of state :math:`|~5~P_{3/2}~m_j=1/2\\rangle`:
import arc as ARC
n0=5;l0=1;j0=1.5;mj0=0.5; #Ground State
nr=60;lr=0;jr=0.5;mjr=0.5; #Target rydberg State
theta=0; #Polar Angle [0-pi]
phi=0; #Azimuthal Angle [0-2pi]
dn = 3; #Range of n to consider (n0-dn:n0+dn)
dl = 3; #Range of l values
deltaMax = 20e9 #Max pair-state energy difference [Hz]
calc = ARC.DressedPairStateInteractions(ARC.Rubidium(), n0,l0,j0,nr,lr,jr, mj0,mjr,interactionsUpTo = 2, Omega0 = 8e-3,Delta0 = 30e-3)
#Omega0 is the rabi frquency of the ac field and Delta0 is the detuning of the ac field from the transition.
rvdw = calc.getLeRoyRadius()
print("LeRoy radius = %.1f mum" % rvdw)
#R array (um)
r=np.linspace(1.5,10,1000)
#Generate pair-state interaction Hamiltonian
calc.defineBasis(theta,phi, dn,dl, deltaMax,progressOutput=True)
#Diagonalise
nEig=1 #Number of eigenstates to extract (we just want the ground state here)
calc.diagonalise(r,nEig,progressOutput=True,sortEigenvectors = True)
#Save data
calc.exportData('60S_dressed_pair_calculation', exportFormat='csv')
#Plot
calc.plotLevelDiagram(hlim = [0.95,1])
calc.ax.set_xlim(1.0,10.0)
calc.ax.set_ylim(-5,3)
calc.showPlot()
"""
from __future__ import division, print_function, absolute_import
from .wigner import Wigner6j, Wigner3j, CG, WignerDmatrix
from .alkali_atom_functions import _EFieldCoupling, _atomLightAtomCoupling
from scipy.constants import physical_constants, pi, epsilon_0, hbar
import gzip
import sys
import datetime
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
from .calculations_atom_single import StarkMap
from .alkali_atom_functions import *
from .divalent_atom_functions import DivalentAtom
from scipy.special import factorial
from scipy import floor
from scipy.special.specfun import fcoef
from scipy.sparse.linalg import eigsh
from scipy.sparse import csr_matrix, hstack, vstack
from numpy.lib.polynomial import real
from numpy.ma import conjugate
from scipy.optimize import curve_fit
from scipy.constants import e as C_e
from scipy.constants import h as C_h
from scipy.constants import c as C_c
from scipy.constants import k as C_k
import re
import numpy as np
from math import exp, log, sqrt
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['xtick.minor.size'] = 4
mpl.rcParams['ytick.minor.size'] = 4
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.family'] = 'serif'
# for matrices
if sys.version_info > (2,):
xrange = range
DPATH = os.path.join(os.path.expanduser('~'), '.arc-data')
#
class DressedPairStateInteractions:
"""
Calculates level diagram (spaghetti) for levels of atoms dressed by rydberg state.
Initializes Rydberg level spaghetti calculation for the given atom
species (or for two atoms of different species) in the vicinity
of the given pair state to which a laser light. For details of calculation see
Ref. [?]_.
Args:
atom (:obj:`AlkaliAtom` or :obj:`DivalentAtom`): = {
:obj:`arc.alkali_atom_data.Lithium6`,
:obj:`arc.alkali_atom_data.Lithium7`,
:obj:`arc.alkali_atom_data.Sodium`,
:obj:`arc.alkali_atom_data.Potassium39`,
:obj:`arc.alkali_atom_data.Potassium40`,
:obj:`arc.alkali_atom_data.Potassium41`,
:obj:`arc.alkali_atom_data.Rubidium85`,
:obj:`arc.alkali_atom_data.Rubidium87`,
:obj:`arc.alkali_atom_data.Caesium`,
:obj:`arc.divalent_atom_data.Strontium88`,
:obj:`arc.divalent_atom_data.Calcium40`
:obj:`arc.divalent_atom_data.Ytterbium174` }
Select the alkali metal for energy level
diagram calculation
n (int): principal quantum number for the ground state
l (int): orbital angular momentum for the ground state
j (float): total angular momentum for the ground state
nn (int): principal quantum number for the rydberg state
ll (int): orbital angular momentum for the rydberg state
jj (float): total angular momentum for the rydberg state
m1 (float): projection of the total angular momentum on z-axis
for the ground state
m2 (float): projection of the total angular momentum on z-axis
for the rydberg state
interactionsUpTo (int): Optional. If set to 1, includes only
dipole-dipole interactions. If set to 2 includes interactions
up to quadrupole-quadrupole. Default value is 1.
s (float): optional, spin state of the first atom. Default value
of 0.5 is correct for :obj:`AlkaliAtom` but for
:obj:`DivalentAtom` it has to be explicitly set to 0 or 1 for
singlet and triplet states respectively.
**If `s2` is not specified, it is assumed that the second
atom is in the same spin state.**
s2 (float): optinal, spin state of the second atom. If not
specified (left to default value None) it will assume spin
state of the first atom.
atom2 (:obj:`AlkaliAtom` or :obj:`DivalentAtom`): optional,
specifies atomic species for the second atom, enabeling
calculation of **inter-species pair-state interactions**.
If not specified (left to default value None) it will assume
spin state of the first atom.
References:
.. [1] Jorge et al.
Examples:
**Advanced interfacing of pair-state is2=None, atom2=Nonenteractions calculations
(PairStateInteractions class).** This
is an advanced example intended for building up extensions to the
existing code. If you want to directly access the pair-state
interaction matrix, constructed by :obj:`defineBasis`,
you can assemble it easily from diagonal part
(stored in :obj:`matDiagonal` ) and off-diagonal matrices whose
spatial dependence is :math:`R^{-3},R^{-4},R^{-5}` stored in that
order in :obj:`matR`. Basis states are stored in :obj:`basisStates`
array.
>>> from arc import *
>>> calc = PairStateInteractions(Rubidium(), 60,0,0.5, \
60,0,0.5, 0.5,0.5,interactionsUpTo = 1)
>>> # theta=0, phi = 0, range of pqn, range of l, deltaE = 25e9
>>> calc.defineBasis(0 ,0 , 5, 5, 25e9, progressOutput=True)
>>> # now calc stores interaction matrix and relevant basis
>>> # we can access this directly and generate interaction matrix
>>> # at distance rval :
>>> rval = 4 # in mum
>>> matrix = calc.matDiagonal
>>> rX = (rval*1.e-6)**3
>>> for matRX in self.matR:
>>> matrix = matrix + matRX/rX
>>> rX *= (rval*1.e-6)
>>> # matrix variable now holds full interaction matrix for
>>> # interacting atoms at distance rval calculated in
>>> # pair-state basis states can be accessed as
>>> basisStates = calc.basisStates
"""
dataFolder = DPATH
# =============================== Methods ===============================
def __init__(self, atom, n, l, j, nn, ll, jj, m1, m2,
interactionsUpTo=1,
s=0.5,
s2=None, atom2=None, Omega0 = 0, Delta0 = 0):
# alkali atom type, principal quantum number, orbital angular momentum,
# total angular momentum projections of the angular momentum on z axis
self.atom1 = atom #: atom type
if atom2 is None:
self.atom2 = atom
else:
self.atom2 = atom2
self.n = n # : pair-state definition: principal quantum number of the ground state
self.l = l # : pair-state definition: orbital angular momentum of the ground state
self.j = j # : pair-state definition: total angular momentum of the ground state
self.nn = nn # : pair-state definition: principal quantum number of rydberg state
self.ll = ll # : pair-state definition: orbital angular momentum of rydberg state
self.jj = jj # : pair-state definition: total angular momentum oof rydberg stateom
self.m1 = m1 # : pair-state definition: projection of the total ang. momentum for the ground state
self.m2 = m2 # : pair-state definition: projection of the total angular momentum for the rydberg state
self.interactionsUpTo = interactionsUpTo
""""
Specifies up to which approximation we include in pair-state interactions.
By default value is 1, corresponding to pair-state interactions up to
dipole-dipole coupling. Value of 2 is also supported, corresponding
to pair-state interactions up to quadrupole-quadrupole coupling.
"""
self.Omega0 = Omega0 #Rabi frequency of the dressing with the near resonant transition (nn, ll, jj, m2).
self.Delta0 = Delta0 # Deltuning from the near resonant transition (nn, ll, jj, m2)
if (issubclass(type(atom),DivalentAtom) and not (s == 0 or s == 1)):
raise ValueError("total angular spin s has to be defined explicitly "
"for calculations, and value has to be 0 or 1 "
"for singlet and tripplet states respectively.")
self.s1 = s #: total spin angular momentum, optional (default 0.5)
if s2 is None:
self.s2 = s
else:
self.s2 = s2
# check that values of spin states are valid for entered atomic species
if issubclass(type(self.atom1), DivalentAtom):
if (abs(self.s1) > 0.1 and abs(self.s1 - 1) > 0.1):
raise ValueError("atom1 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)")
elif (abs(self.s1 - 0.5) > 0.1):
raise ValueError("atom1 is AlkaliAtom and its spin has to be "
"s=0.5")
if issubclass(type(self.atom2), DivalentAtom):
if (abs(self.s2) > 0.1 and abs(self.s2 - 1) > 0.1):
raise ValueError("atom2 is DivalentAtom and its spin has to be "
"s=0 or s=1 (for singlet and triplet states "
"respectively)")
elif (abs(self.s2 - 0.5) > 0.1):
# we have divalent atom
raise ValueError("atom2 is AlkaliAtom and its spin has to be "
"s=0.5")
if (abs((self.s1-self.m1) % 1) > 0.1):
raise ValueError("atom1 with spin s = %.1d cannot have m1 = %.1d"
% (self.s1, self.m1))
if (abs((self.s2-self.m2) % 1) > 0.1):
raise ValueError("atom2 with spin s = %.1d cannot have m2 = %.1d"
% (self.s2, self.m2))
# ====================== J basis (not resolving mj) ===================
self.coupling = []
"""
List of matrices defineing coupling strengths between the states in
J basis (not resolving :math:`m_j` ). Basis is given by
:obj:`channel`. Used as intermediary for full interaction matrix
calculation by :obj:`defineBasis`.
"""
self.channel = []
"""
states relevant for calculation, defined in J basis (not resolving
:math:`m_j`. Used as intermediary for full interaction matrix
calculation by :obj:`defineBasis`.
"""
# ======================= Full basis (resolving mj) ===================
self.basisStates = []
"""
List of pair-states for calculation. In the form
[[n1,l1,j1,mj1,n2,l2,j2,mj2], ...].
Each state is an array [n1,l1,j1,mj1,n2,l2,j2,mj2] corresponding to
:math:`|n_1,l_1,j_1,m_{j1},n_2,l_2,j_2,m_{j2}\\rangle` state.
Calculated by :obj:`defineBasis`.
"""
self.matrixElement = []
"""
`matrixElement[i]` gives index of state in :obj:`channel` basis
(that doesn't resolve :obj:`m_j` states), for the given index `i`
of the state in :obj:`basisStates` ( :math:`m_j` resolving) basis.
"""
# variuos parts of interaction matrix in pair-state basis
self.matDiagonal = []
"""
Part of interaction matrix in pair-state basis that doesn't depend
on inter-atomic distance. E.g. diagonal elements of the interaction
matrix, that describe energies of the pair states in unperturbed
basis, will be stored here. Basis states are stored in
:obj:`basisStates`. Calculated by :obj:`defineBasis`.
"""
self.matR = []
"""
Stores interaction matrices in pair-state basis
that scale as :math:`1/R^3`, :math:`1/R^4` and :math:`1/R^5`
with distance in :obj:`matR[0]`, :obj:`matR[1]` and :obj:`matR[2]`
respectively. These matrices correspond to dipole-dipole
( :math:`C_3`), dipole-quadrupole ( :math:`C_4`) and
quadrupole-quadrupole ( :math:`C_5`) interactions
coefficients. Basis states are stored in :obj:`basisStates`.
Calculated by :obj:`defineBasis`.
"""
self.originalPairStateIndex = 0
"""
index of the original n,l,j,m1,nn,ll,jj,m2 pair-state in the
:obj:`basisStates` basis.
"""
self.matE = []
self.matB_1 = []
self.matB_2 = []
# ===================== Eigen states and plotting =====================
# finding perturbed energy levels
self.r = [] # detuning scale
self.y = [] # energy levels
self.highlight = []
# pointers towards figure
self.fig = 0
self.ax = 0
# for normalization of the maximum coupling later
self.maxCoupling = 0.
# n,l,j,mj, drive polarization q
self.drivingFromState = [0, 0, 0, 0, 0]
# sam = saved angular matrix metadata
self.angularMatrixFile = "angularMatrix.npy"
self.angularMatrixFile_meta = "angularMatrix_meta.npy"
#self.sam = []
self.savedAngularMatrix_matrix = []
# intialize precalculated values for factorial term
# in __getAngularMatrix_M
def fcoef(l1, l2, m):
return factorial(l1 + l2) / (factorial(l1 + m)
* factorial(l1 - m)
* factorial(l2 + m)
* factorial(l2 - m))**0.5
x = self.interactionsUpTo
self.fcp = np.zeros((x + 1, x + 1, 2 * x + 1))
for c1 in range(1, x + 1):
for c2 in range(1, x + 1):
for p in range(-min(c1, c2), min(c1, c2) + 1):
self.fcp[c1, c2, p + x] = fcoef(c1, c2, p)
self.conn = False
self.c = False
def __getAngularMatrix_M(self, l, j, ll, jj, l1, j1, l2, j2):
# did we already calculated this matrix?
self.c.execute('''SELECT ind FROM pair_angularMatrix WHERE
l1 = ? AND j1_x2 = ? AND
l2 = ? AND j2_x2 = ? AND
l3 = ? AND j3_x2 = ? AND
l4 = ? AND j4_x2 = ?
''', (l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2))
index = self.c.fetchone()
if (index):
return self.savedAngularMatrix_matrix[index[0]]
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1):
c1 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
exit()
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1):
c2 = 2 # quadrupole coupling
else:
raise ValueError("error in __getAngularMatrix_M")
exit()
am = np.zeros((int(round((2 * j1 + 1) * (2 * j2 + 1), 0)),
int(round((2 * j + 1) * (2 * jj + 1), 0))),
dtype=np.float64)
if (c1 > self.interactionsUpTo) or (c2 > self.interactionsUpTo):
return am
j1range = np.linspace(-j1, j1, round(2 * j1) + 1)
j2range = np.linspace(-j2, j2, round(2 * j2) + 1)
jrange = np.linspace(-j, j, int(2 * j) + 1)
jjrange = np.linspace(-jj, jj, int(2 * jj) + 1)
for m1 in j1range:
for m2 in j2range:
# we have chosen the first index
index1 = int(round(m1 * (2.0 * j2 + 1.0) + m2
+ (j1 * (2.0 * j2 + 1.0) + j2), 0))
for m in jrange:
for mm in jjrange:
# we have chosen the second index
index2 = int(round(m * (2.0 * jj + 1.0)
+ mm + (j * (2.0 * jj + 1.0) + jj),
0)
)
# angular matrix element from Sa??mannshausen, Heiner,
# Merkt, Fr??d??ric, Deiglmayr, Johannes
# PRA 92: 032505 (2015)
elem = (-1.0)**(j + jj + self.s1 + self.s2 + l1 + l2) * \
CG(l, 0, c1, 0, l1, 0) * CG(ll, 0, c2, 0, l2, 0)
elem = elem * \
sqrt((2.0 * l + 1.0) * (2.0 * ll + 1.0)) * \
sqrt((2.0 * j + 1.0) * (2.0 * jj + 1.0))
elem = elem * \
Wigner6j(l, self.s1, j, j1, c1, l1) * \
Wigner6j(ll, self.s2, jj, j2, c2, l2)
sumPol = 0.0 # sum over polarisations
limit = min(c1, c2)
for p in xrange(-limit, limit + 1):
sumPol = sumPol + \
self.fcp[c1, c2, p + self.interactionsUpTo] * \
CG(j, m, c1, p, j1, m1) *\
CG(jj, mm, c2, -p, j2, m2)
am[index1, index2] = elem * sumPol
index = len(self.savedAngularMatrix_matrix)
self.c.execute(''' INSERT INTO pair_angularMatrix
VALUES (?,?, ?,?, ?,?, ?,?, ?)''',
(l, j * 2, ll, jj * 2, l1, j1 * 2, l2, j2 * 2, index))
self.conn.commit()
self.savedAngularMatrix_matrix.append(am)
self.savedAngularMatrixChanged = True
return am
def __updateAngularMatrixElementsFile(self):
if not (self.savedAngularMatrixChanged):
return
try:
self.c.execute('''SELECT * FROM pair_angularMatrix ''')
data = []
for v in self.c.fetchall():
data.append(v)
data = np.array(data, dtype=np.float32)
data[:, 1] /= 2. # 2 r j1 -> j1
data[:, 3] /= 2. # 2 r j2 -> j2
data[:, 5] /= 2. # 2 r j3 -> j3
data[:, 7] /= 2. # 2 r j4 -> j4
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta),
'wb'
)
np.save(fileHandle, data)
fileHandle.close()
except IOError as e:
print("Error while updating angularMatrix \
data meta (description) File " + self.angularMatrixFile_meta)
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile),
'wb'
)
np.save(fileHandle, self.savedAngularMatrix_matrix)
fileHandle.close()
except IOError as e:
print("Error while updating angularMatrix \
data File " + self.angularMatrixFile)
print(e)
def __loadAngularMatrixElementsFile(self):
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile_meta),
'rb'
)
data = np.load(fileHandle, encoding='latin1', allow_pickle=True)
fileHandle.close()
except:
print("Note: No saved angular matrix metadata files to be loaded.")
print(sys.exc_info())
return
data[:, 1] *= 2 # j1 -> 2 r j1
data[:, 3] *= 2 # j2 -> 2 r j2
data[:, 5] *= 2 # j3 -> 2 r j3
data[:, 7] *= 2 # j4 -> 2 r j4
data = np.array(np.rint(data), dtype=np.int)
try:
self.c.executemany('''INSERT INTO pair_angularMatrix
(l1, j1_x2 ,
l2 , j2_x2 ,
l3, j3_x2,
l4 , j4_x2 ,
ind)
VALUES (?,?,?,?,?,?,?,?,?)''', data)
self.conn.commit()
except sqlite3.Error as e:
print("Error while loading precalculated values into the database!")
print(e)
exit()
if len(data) == 0:
print("error")
return
try:
fileHandle = gzip.GzipFile(
os.path.join(self.dataFolder, self.angularMatrixFile),
'rb'
)
self.savedAngularMatrix_matrix = np.load(
fileHandle,
encoding='latin1',
allow_pickle=True).tolist()
fileHandle.close()
except:
print("Note: No saved angular matrix files to be loaded.")
print(sys.exc_info())
def __isCoupled(self, n, l, j, nn, ll, jj, n1, l1, j1, n2, l2, j2, limit):
if ((abs(self.__getEnergyDefect(n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2)
) / C_h < limit)
and not (n == n1 and nn == n2
and l == l1 and ll == l2
and j == j1 and jj == j2)
and not ((abs(l1 - l) != 1
and( (abs(j - 0.5) < 0.1
and abs(j1 - 0.5) < 0.1) # j = 1/2 and j'=1/2 forbidden
or
(abs(j) < 0.1
and abs(j1 - 1) < 0.1) # j = 0 and j'=1 forbidden
or
(abs(j-1) < 0.1
and abs(j1) < 0.1) # j = 1 and j'=0 forbidden
)
)
or (abs(l2 - ll) != 1
and( (abs(jj - 0.5) < 0.1
and abs(j2 - 0.5) < 0.1) # j = 1/2 and j'=1/2 forbidden
or
(abs(jj) < 0.1
and abs(j2 - 1) < 0.1) # j = 0 and j'=1 forbidden
or
(abs(jj-1) < 0.1
and abs(j2) < 0.1) # j = 1 and j'=0 forbidden
)
)
)
and not(abs(j)<0.1 and abs(j1)<0.1) # j = 0 and j'=0 forbiden
and not (abs(jj)<0.1 and abs(j2)<0.1)
and not (abs(l)<0.1 and abs(l1)<0.1) # l = 0 and l' = 0 is forbiden
and not (abs(ll)<0.1 and abs(l2)<0.1)
):
# determine coupling
dl = abs(l - l1)
dj = abs(j - j1)
c1 = 0
if dl == 1 and (dj < 1.1):
c1 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1)and (dj < 2.1) and \
(2 <= self.interactionsUpTo):
c1 = 2 # quadrupole coupling
else:
return False
dl = abs(ll - l2)
dj = abs(jj - j2)
c2 = 0
if dl == 1 and (dj < 1.1):
c2 = 1 # dipole coupling
elif (dl == 0 or dl == 2 or dl == 1) and (dj < 2.1) and \
(2 <= self.interactionsUpTo):
c2 = 2 # quadrupole coupling
else:
return False
return c1 + c2
else:
return False
def __getEnergyDefect(self,
n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2):
"""
Energy defect between |n,l,j>x|nn,ll,jj> state and |n1,l1,j1>x|n1,l1,j1>
state of atom1 and atom2 in respective spins states s1 and s2
Takes spin vales s1 and s2 as the one defined when defining calculation.
Args:
n (int): principal quantum number
l (int): orbital angular momentum
j (float): total angular momentum
nn (int): principal quantum number
ll (int): orbital angular momentum
jj (float): total angular momentum
n1 (int): principal quantum number
l1 (int): orbital angular momentum
j1 (float): total angular momentum
n2 (int): principal quantum number
l2 (int): orbital angular momentum
j2 (float): total angular momentum
Returns:
float: energy defect (SI units: J)
"""
return C_e * (self.atom1.getEnergy(n1, l1, j1, s=self.s1)
+ self.atom2.getEnergy(n2, l2, j2, s=self.s2)
- self.atom1.getEnergy(n, l, j, s=self.s1)
- self.atom2.getEnergy(nn, ll, jj, s=self.s2))
def __makeRawMatrix2(self,
nn, ll, jj,
k, lrange, limit, limitBasisToMj,
progressOutput=False, debugOutput=False):
n = nn
l = ll
j = jj
# limit = limit in Hz on energy defect
# k defines range of n' = [n-k, n+k]
dimension = 0
# which states/channels contribute significantly in the second order perturbation?
states = []
# original pairstate index
opi = 0
# this numbers are conserved if we use only dipole-dipole interactions
Lmod2 = ((l + ll) % 2)
l1start = l - 1
if l == 0:
l1start = 0
l2start = ll - 1
if ll == 0:
l2start = 0
if debugOutput:
print("\n ======= Relevant states =======\n")
for n1 in xrange(max(n - k, 1), n + k + 1):
for n2 in xrange(max(nn - k, 1), nn + k + 1):
l1max = max(l + self.interactionsUpTo, lrange) + 1
l1max = min(l1max, n1 - 1)
for l1 in xrange(l1start, l1max):
l2max = max(ll + self.interactionsUpTo, lrange) + 1
l2max = min(l2max, n2 - 1)
for l2 in xrange(l2start, l2max):
j1 = l1 - self.s1
while j1 < -0.1:
j1 += 2 * self.s1
while j1 <= l1 + self.s1 + 0.1:
j2 = l2 - self.s2
while j2 < -0.1:
j2 += 2 * self.s2
while j2 <= l2 + self.s2 + 0.1:
ed = self.__getEnergyDefect(n, l, j,
nn, ll, jj,
n1, l1, j1,
n2, l2, j2) / C_h
if (abs(ed) < limit
and (not (self.interactionsUpTo == 1)
or (Lmod2 == ((l1 + l2) % 2)))
and ((not limitBasisToMj)
or (j1 + j2 + 0.1
> self.m1 + self.m2))
and (n1 >= self.atom1.groundStateN
or [n1, l1, j1] in self.atom1.extraLevels)
and (n2 >= self.atom2.groundStateN
or [n2, l2, j2] in self.atom2.extraLevels)
):
if debugOutput:
pairState = (
"|"
+ printStateString(n1, l1, j1,
s=self.s1)
+ ","
+ printStateString(n2, l2, j2,
s=self.s2)
+ ">")
print(
pairState
+ ("\t EnergyDefect = %.3f GHz"
% (ed * 1.e-9)
)
)
states.append([n1, l1, j1, n2, l2, j2])
if (n == n1 and nn == n2
and l == l1 and ll == l2
and j == j1 and jj == j2
):
opi = dimension
dimension = dimension + 1
j2 = j2 + 1.0
j1 = j1 + 1.0
if debugOutput:
print("\tMatrix dimension\t=\t", dimension)
m = np.zeros((dimension, dimension), dtype=np.float64)
# mat_value, mat_row, mat_column for each sparce matrix describing
# dipole-dipole, dipole-quadrupole (and quad-dipole) and quadrupole-quadrupole
couplingMatConstructor = [[[], [], []]
for i in xrange(2 * self.interactionsUpTo - 1)]
# original pair-state (i.e. target pair state) Zeeman Shift
opZeemanShift = (self.atom1.getZeemanEnergyShift(
self.l, self.j, self.m1,
self.Bz,
s=self.s1)
+ self.atom2.getZeemanEnergyShift(
self.ll, self.jj, self.m2,
self.Bz,
s=self.s2)
) / C_h * 1.0e-9 # in GHz
if debugOutput:
print("\n ======= Coupling strengths (radial part only) =======\n")
maxCoupling = "quadrupole-quadrupole"
if (self.interactionsUpTo == 1):
maxCoupling = "dipole-dipole"
if debugOutput:
print("Calculating coupling (up to ",
maxCoupling, ") between the pair states")
for i in xrange(dimension):
ed = self.__getEnergyDefect(
states[opi][0], states[opi][1], states[opi][2],
states[opi][3], states[opi][4], states[opi][5],
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5]) / C_h * 1.0e-9\
- opZeemanShift
pairState1 = (
"|"
+ printStateString(states[i][0], states[i][1], states[i][2],
s=self.s1)
+ ","
+ printStateString(states[i][3], states[i][4], states[i][5],
s=self.s2)
+ ">"
)
states[i].append(ed) # energy defect of given state
for j in xrange(i + 1, dimension):
coupled = self.__isCoupled(
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5],
states[j][0], states[j][1], states[j][2],
states[j][3], states[j][4], states[j][5], limit)
if (states[i][0] == 24 and states[j][0] == 18):
print("\n")
print(states[i])
print(states[j])
print(coupled)
if coupled and (abs(states[i][0] - states[j][0]) <= k
and abs(states[i][3] - states[j][3]) <= k):
if debugOutput:
pairState2 = ("|"
+ printStateString(states[j][0],
states[j][1],
states[j][2],
s=self.s1)
+ ","
+ printStateString(states[j][3],
states[j][4],
states[j][5],
s=self.s2)
+ ">")
print(pairState1 + " <---> " + pairState2)
couplingStregth = _atomLightAtomCoupling(
states[i][0], states[i][1], states[i][2],
states[i][3], states[i][4], states[i][5],
states[j][0], states[j][1], states[j][2],
states[j][3], states[j][4], states[j][5],
self.atom1, atom2=self.atom2,
s=self.s1, s2=self.s2) / C_h * 1.0e-9
couplingMatConstructor[coupled - 2][0].append(
couplingStregth)
couplingMatConstructor[coupled - 2][1].append(i)
couplingMatConstructor[coupled - 2][2].append(j)
exponent = coupled + 1
if debugOutput:
print(("\tcoupling (C_%d/R^%d) = %.5f"
% (exponent, exponent,
couplingStregth * (1e6)**(exponent))),
"/R^", exponent, " GHz (mu m)^", exponent, "\n"
)
# coupling = [1,1] dipole-dipole, [2,1] quadrupole dipole, [2,2] quadrupole quadrupole
couplingMatArray = [
csr_matrix(
(couplingMatConstructor[i][0],
(couplingMatConstructor[i][1], couplingMatConstructor[i][2])
),
shape=(dimension, dimension)
)
for i in xrange(len(couplingMatConstructor))
]
return states, couplingMatArray
def __initializeDatabaseForMemoization(self):
# memoization of angular parts
self.conn = sqlite3.connect(os.path.join(self.dataFolder,
"precalculated_pair.db"))
self.c = self.conn.cursor()
# ANGULAR PARTS
self.c.execute('''DROP TABLE IF EXISTS pair_angularMatrix''')
self.c.execute('''SELECT COUNT(*) FROM sqlite_master
WHERE type='table' AND name='pair_angularMatrix';''')
if (self.c.fetchone()[0] == 0):
# create table
try:
self.c.execute('''CREATE TABLE IF NOT EXISTS pair_angularMatrix
(l1 TINYINT UNSIGNED, j1_x2 TINYINT UNSIGNED,
l2 TINYINT UNSIGNED, j2_x2 TINYINT UNSIGNED,
l3 TINYINT UNSIGNED, j3_x2 TINYINT UNSIGNED,
l4 TINYINT UNSIGNED, j4_x2 TINYINT UNSIGNED,
ind INTEGER,
PRIMARY KEY (l1,j1_x2, l2,j2_x2, l3,j3_x2, l4,j4_x2)
) ''')
except sqlite3.Error as e:
print(e)
self.conn.commit()
self.__loadAngularMatrixElementsFile()
self.savedAngularMatrixChanged = False
def __closeDatabaseForMemoization(self):
self.conn.commit()
self.conn.close()
self.conn = False
self.c = False
def getLeRoyRadius(self):
"""
Returns Le Roy radius for initial pair-state.
Le Roy radius [#leroy]_ is defined as
:math:`2(\\langle r_1^2 \\rangle^{1/2} + \\langle r_2^2 \\rangle^{1/2})`,
where :math:`r_1` and :math:`r_2` are electron coordinates for the
first and the second atom in the initial pair-state.
Below this radius, calculations are not valid since electron
wavefunctions start to overlap.
Returns:
float: LeRoy radius measured in :math:`\\mu m`
References:
.. [#leroy] <NAME>, <NAME>. Phys. **52**, 246 (1974)
http://www.nrcresearchpress.com/doi/abs/10.1139/p74-035
"""
step = 0.001
r1, psi1_r1 = self.atom2.radialWavefunction(
self.ll, 0.5, self.jj,
self.atom2.getEnergy(self.nn, self.ll, self.jj, s=self.s2) / 27.211,
self.atom2.alphaC**(1 / 3.0),
2.0 * self.nn * (self.nn + 15.0), step)
sqrt_r1_on2 = np.trapz(np.multiply( | np.multiply(psi1_r1, psi1_r1) | numpy.multiply |
import fast_bss_eval
import numpy as np
import pytest
import torch
def _random_input_pairs(
nbatch,
nchan,
nsamples,
is_fp32=False,
is_torch=False,
rand_perm=True,
):
assert nsamples >= 2 * nchan
# we create a lot of orthogonal signals
signals = np.random.randn(nbatch, 2 * nchan, nsamples)
cov = np.einsum("...cn,...dn->...cd", signals, signals) / nsamples
ev, evec = np.linalg.eigh(cov)
orth_signals = np.einsum(
"...cd,...dn->...cn", evec.swapaxes(-2, -1) / np.sqrt(ev[..., None]), signals
)
cov_test = np.einsum("...cn, ...dn->...cd", orth_signals, orth_signals) / nsamples
assert np.max(np.abs(cov_test - np.eye(2 * nchan))) < 1e-5
ref = orth_signals[..., :nchan, :]
noise = orth_signals[..., nchan:, :]
# mixing coefficients
# make sure the diagonal of alpha is larger than off-diag coeffecients
alpha = np.random.rand(nbatch, nchan, nchan) - 0.5 + 2.0 * np.eye(nchan)
alpha2 = alpha**2
beta = np.random.randn(nbatch, nchan)
beta2 = beta**2
alpha2_diag = np.diagonal(alpha2, axis1=-2, axis2=-1)
alpha2_sum = alpha2.sum(axis=-1)
SDR = 10.0 * np.log10(alpha2_diag / (alpha2_sum - alpha2_diag + beta2))
SIR = 10.0 * np.log10(alpha2_diag / (alpha2_sum - alpha2_diag))
SAR = 10.0 * np.log10(alpha2_sum / beta2)
est = np.einsum("...cd,...dn->...cn", alpha, ref) + beta[..., None] * noise
# add a random permutation
if rand_perm:
perm = np.random.permutation(nchan)
else:
perm = np.arange(nchan)
est = est[..., perm, :]
inv_perm = np.empty(perm.size, dtype=perm.dtype)
for i in | np.arange(perm.size) | numpy.arange |
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
__all__ = ['SplitRegularization']
import logging
import numpy as np
from .coordinates import makeTransformation
from .costfunctions import CostFunction
import esys.escript as escript
import esys.escript.linearPDEs as linearPDEs
import esys.escript.pdetools as pdetools
class SplitRegularization(CostFunction):
"""
The regularization term for the level set function ``m`` within the cost
function J for an inversion:
*J(m)=1/2 * sum_k integrate( mu[k] * ( w0[k] * m_k**2 * w1[k,i] * m_{k,i}**2) + sum_l<k mu_c[l,k] wc[l,k] * | curl(m_k) x curl(m_l) |^2*
where w0[k], w1[k,i] and wc[k,l] are non-negative weighting factors and
mu[k] and mu_c[l,k] are trade-off factors which may be altered
during the inversion. The weighting factors are normalized such that their
integrals over the domain are constant:
*integrate(w0[k] + inner(w1[k,:],1/L[:]**2))=scale[k]* volume(domain)*
*integrate(wc[l,k]*1/L**4)=scale_c[k]* volume(domain) *
"""
def __init__(self, domain, numLevelSets=1,
w0=None, w1=None, wc=None,
location_of_set_m=escript.Data(),
useDiagonalHessianApproximation=False, tol=1e-8,
coordinates=None,
scale=None, scale_c=None):
"""
initialization.
:param domain: domain
:type domain: `Domain`
:param numLevelSets: number of level sets
:type numLevelSets: ``int``
:param w0: weighting factor for the m**2 term. If not set zero is assumed.
:type w0: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of shape
(``numLevelSets`` ,) if ``numLevelSets`` > 1
:param w1: weighting factor for the grad(m_i) terms. If not set zero is assumed
:type w1: ``Vector`` if ``numLevelSets`` == 1 or `Data` object of shape
(``numLevelSets`` , DIM) if ``numLevelSets`` > 1
:param wc: weighting factor for the cross gradient terms. If not set
zero is assumed. Used for the case if ``numLevelSets`` > 1
only. Only values ``wc[l,k]`` in the lower triangle (l<k)
are used.
:type wc: `Data` object of shape (``numLevelSets`` , ``numLevelSets``)
:param location_of_set_m: marks location of zero values of the level
set function ``m`` by a positive entry.
:type location_of_set_m: ``Scalar`` if ``numLevelSets`` == 1 or `Data`
object of shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
:param useDiagonalHessianApproximation: if True cross gradient terms
between level set components are ignored when calculating
approximations of the inverse of the Hessian Operator.
This can speed-up the calculation of the inverse but may
lead to an increase of the number of iteration steps in the
inversion.
:type useDiagonalHessianApproximation: ``bool``
:param tol: tolerance when solving the PDE for the inverse of the
Hessian Operator
:type tol: positive ``float``
:param coordinates: defines coordinate system to be used
:type coordinates: ReferenceSystem` or `SpatialCoordinateTransformation`
:param scale: weighting factor for level set function variation terms.
If not set one is used.
:type scale: ``Scalar`` if ``numLevelSets`` == 1 or `Data` object of
shape (``numLevelSets`` ,) if ``numLevelSets`` > 1
:param scale_c: scale for the cross gradient terms. If not set
one is assumed. Used for the case if ``numLevelSets`` > 1
only. Only values ``scale_c[l,k]`` in the lower triangle
(l<k) are used.
:type scale_c: `Data` object of shape (``numLevelSets``,``numLevelSets``)
"""
if w0 is None and w1 is None:
raise ValueError("Values for w0 or for w1 must be given.")
if wc is None and numLevelSets>1:
raise ValueError("Values for wc must be given.")
self.__pre_input = None
self.__pre_args = None
self.logger = logging.getLogger('inv.%s'%self.__class__.__name__)
self.__domain=domain
DIM=self.__domain.getDim()
self.__numLevelSets=numLevelSets
self.__trafo=makeTransformation(domain, coordinates)
self.__pde=linearPDEs.LinearPDE(self.__domain, numEquations=self.__numLevelSets, numSolutions=self.__numLevelSets)
self.__pde.getSolverOptions().setTolerance(tol)
self.__pde.setSymmetryOn()
self.__pde.setValue(A=self.__pde.createCoefficient('A'), D=self.__pde.createCoefficient('D'), )
try:
self.__pde.setValue(q=location_of_set_m)
except linearPDEs.IllegalCoefficientValue:
raise ValueError("Unable to set location of fixed level set function.")
# =========== check the shape of the scales: ========================
if scale is None:
if numLevelSets == 1 :
scale = 1.
else:
scale = np.ones((numLevelSets,))
else:
scale=np.asarray(scale)
if numLevelSets == 1:
if scale.shape == ():
if not scale > 0 :
raise ValueError("Value for scale must be positive.")
else:
raise ValueError("Unexpected shape %s for scale."%scale.shape)
else:
if scale.shape is (numLevelSets,):
if not min(scale) > 0:
raise ValueError("All values for scale must be positive.")
else:
raise ValueError("Unexpected shape %s for scale."%scale.shape)
if scale_c is None or numLevelSets < 2:
scale_c = np.ones((numLevelSets,numLevelSets))
else:
scale_c=np.asarray(scale_c)
if scale_c.shape == (numLevelSets,numLevelSets):
if not all( [ [ scale_c[l,k] > 0. for l in range(k) ] for k in range(1,numLevelSets) ]):
raise ValueError("All values in the lower triangle of scale_c must be positive.")
else:
raise ValueError("Unexpected shape %s for scale."%scale_c.shape)
# ===== check the shape of the weights: =============================
if w0 is not None:
w0 = escript.interpolate(w0,self.__pde.getFunctionSpaceForCoefficient('D'))
s0=w0.getShape()
if numLevelSets == 1:
if not s0 == () :
raise ValueError("Unexpected shape %s for weight w0."%(s0,))
else:
if not s0 == (numLevelSets,):
raise ValueError("Unexpected shape %s for weight w0."%(s0,))
if not self.__trafo.isCartesian():
w0*=self.__trafo.getVolumeFactor()
if not w1 is None:
w1 = escript.interpolate(w1,self.__pde.getFunctionSpaceForCoefficient('A'))
s1=w1.getShape()
if numLevelSets == 1 :
if not s1 == (DIM,) :
raise ValueError("Unexpected shape %s for weight w1."%(s1,))
else:
if not s1 == (numLevelSets,DIM):
raise ValueError("Unexpected shape %s for weight w1."%(s1,))
if not self.__trafo.isCartesian():
f=self.__trafo.getScalingFactors()**2*self.__trafo.getVolumeFactor()
if numLevelSets == 1:
w1*=f
else:
for i in range(numLevelSets): w1[i,:]*=f
if numLevelSets == 1:
wc=None
else:
wc = escript.interpolate(wc,self.__pde.getFunctionSpaceForCoefficient('A'))
sc=wc.getShape()
if not sc == (numLevelSets, numLevelSets):
raise ValueError("Unexpected shape %s for weight wc."%(sc,))
if not self.__trafo.isCartesian():
raise ValueError("Non-cartesian coordinates for cross-gradient term is not supported yet.")
# ============= now we rescale weights: =============================
L2s=np.asarray(escript.boundingBoxEdgeLengths(domain))**2
L4=1/np.sum(1/L2s)**2
if numLevelSets == 1:
A=0
if w0 is not None:
A = escript.integrate(w0)
if w1 is not None:
A += escript.integrate(inner(w1, 1/L2s))
if A > 0:
f = scale/A
if w0 is not None:
w0*=f
if w1 is not None:
w1*=f
else:
raise ValueError("Non-positive weighting factor detected.")
else: # numLevelSets > 1
for k in range(numLevelSets):
A=0
if w0 is not None:
A = escript.integrate(w0[k])
if w1 is not None:
A += escript.integrate(inner(w1[k,:], 1/L2s))
if A > 0:
f = scale[k]/A
if w0 is not None:
w0[k]*=f
if w1 is not None:
w1[k,:]*=f
else:
raise ValueError("Non-positive weighting factor for level set component %d detected."%k)
# and now the cross-gradient:
if wc is not None:
for l in range(k):
A = escript.integrate(wc[l,k])/L4
if A > 0:
f = scale_c[l,k]/A
wc[l,k]*=f
# else:
# raise ValueError("Non-positive weighting factor for cross-gradient level set components %d and %d detected."%(l,k))
self.__w0=w0
self.__w1=w1
self.__wc=wc
self.__pde_is_set=False
if self.__numLevelSets > 1:
self.__useDiagonalHessianApproximation=useDiagonalHessianApproximation
else:
self.__useDiagonalHessianApproximation=True
self._update_Hessian=True
self.__num_tradeoff_factors=numLevelSets+((numLevelSets-1)*numLevelSets)//2
self.setTradeOffFactors()
self.__vol_d=escript.vol(self.__domain)
def getDomain(self):
"""
returns the domain of the regularization term
:rtype: ``Domain``
"""
return self.__domain
def getCoordinateTransformation(self):
"""
returns the coordinate transformation being used
:rtype: `CoordinateTransformation`
"""
return self.__trafo
def getNumLevelSets(self):
"""
returns the number of level set functions
:rtype: ``int``
"""
return self.__numLevelSets
def getPDE(self):
"""
returns the linear PDE to be solved for the Hessian Operator inverse
:rtype: `linearPDEs.LinearPDE`
"""
return self.__pde
def getDualProduct(self, m, r):
"""
returns the dual product of a gradient represented by X=r[1] and Y=r[0]
with a level set function m:
*Y_i*m_i + X_ij*m_{i,j}*
:type m: `Data`
:type r: `ArithmeticTuple`
:rtype: ``float``
"""
A=0
if not r[0].isEmpty(): A+=escript.integrate(inner(r[0], m))
if not r[1].isEmpty(): A+=escript.integrate(inner(r[1], escript.grad(m)))
return A
def getNumTradeOffFactors(self):
"""
returns the number of trade-off factors being used.
:rtype: ``int``
"""
return self.__num_tradeoff_factors
def setTradeOffFactors(self, mu=None):
"""
sets the trade-off factors for the level-set variation and the
cross-gradient.
:param mu: new values for the trade-off factors where values
mu[:numLevelSets] are the trade-off factors for the
level-set variation and the remaining values for
the cross-gradient part with
mu_c[l,k]=mu[numLevelSets+l+((k-1)*k)/2] (l<k).
If no values for mu are given ones are used.
Values must be positive.
:type mu: ``list`` of ``float`` or ```numpy.array```
"""
numLS=self.getNumLevelSets()
numTF=self.getNumTradeOffFactors()
if mu is None:
mu = np.ones((numTF,))
else:
mu = np.asarray(mu)
if mu.shape == (numTF,):
self.setTradeOffFactorsForVariation(mu[:numLS])
mu_c2=np.zeros((numLS,numLS))
for k in range(numLS):
for l in range(k):
mu_c2[l,k] = mu[numLS+l+((k-1)*k)//2]
self.setTradeOffFactorsForCrossGradient(mu_c2)
elif mu.shape == () and numLS ==1:
self.setTradeOffFactorsForVariation(mu)
else:
raise ValueError("Unexpected shape %s for mu."%(mu.shape,))
def setTradeOffFactorsForVariation(self, mu=None):
"""
sets the trade-off factors for the level-set variation part.
:param mu: new values for the trade-off factors. Values must be positive.
:type mu: ``float``, ``list`` of ``float`` or ```numpy.array```
"""
numLS=self.getNumLevelSets()
if mu is None:
if numLS == 1:
mu = 1.
else:
mu = np.ones((numLS,))
if type(mu) == list:
#this is a fix for older versions of numpy where passing in an a list of ints causes
#this code to break.
mu=np.asarray([float(i) for i in mu])
else:
mu=np.asarray(mu)
if numLS == 1:
if mu.shape == (1,): mu=mu[0]
if mu.shape == ():
if mu > 0:
self.__mu= mu
self._new_mu=True
else:
raise ValueError("Value for trade-off factor must be positive.")
else:
raise ValueError("Unexpected shape %s for mu."%str(mu.shape))
else:
if mu.shape == (numLS,):
if min(mu) > 0:
self.__mu= mu
self._new_mu=True
else:
raise ValueError("All values for mu must be positive.")
else:
raise ValueError("Unexpected shape %s for trade-off factor."%str(mu.shape))
def setTradeOffFactorsForCrossGradient(self, mu_c=None):
"""
sets the trade-off factors for the cross-gradient terms.
:param mu_c: new values for the trade-off factors for the cross-gradient
terms. Values must be positive. If no value is given ones
are used. Only value mu_c[l,k] for l<k are used.
:type mu_c: ``float``, ``list`` of ``float`` or ``numpy.array``
"""
numLS=self.getNumLevelSets()
if mu_c is None or numLS < 2:
self.__mu_c = np.ones((numLS,numLS))
elif isinstance(mu_c, float) or isinstance(mu_c, int):
self.__mu_c = | np.zeros((numLS,numLS)) | numpy.zeros |
from typing import Tuple
import numpy as np
import pandas as pd
from scipy import interpolate
_YOUNG_MODULES = {
'concrete': 2.05e4,
'steel': 2.05e5
}
def get_results(mode, condition, bottom_condition, material, diameter, length, level, force, soil_data, div_num) -> dict:
diameter = float(diameter) # mm
length = float(length) * 1e3 # to mm
level = float(level) * 1e3 # to mm
force = float(force) * 1e3 # to N
div_num = int(div_num)
div_size = length / div_num
x = np.linspace(-level, length - level, div_num + 1)
kh0s = kh_by_soil_data(diameter, x, soil_data)
stiffness = pile_stiffness(diameter, diameter, 0, material)
k0 = top_condition_to_stiffness(mode, condition, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, force)
y, dec = solve_y(mode, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
t = np.gradient(y, div_size) # solve degree
m = -np.gradient(t, div_size) * stiffness # solve moment
q = np.gradient(m, div_size) # solve shear
q[2] = -force
return dict(
x=x / 1e3, # m
dec=dec,
kh0s=kh0s * 1e6, # kN/m3 (低減前の地盤反力係数)
y=y[2:-2], # mm
t=t[2:-2] * 1e3, # x10^3 rad
m=m[2:-2] / 1e6, # kNm
q=q[2:-2] / 1e3 # kN
)
def kh_by_soil_data(diameter: float, x: np.ndarray, soil_data: dict):
depth = np.array(soil_data.get('depth')) * 1e3
alpha = np.array(soil_data.get('alpha'))
beta = np.array(soil_data.get('adopted_reductions'))
E0 = np.array(soil_data.get('E0'))
kh = alpha * beta * E0 * (diameter / 10) ** (-3 / 4)
fitted = interpolate.interp1d(depth, kh) # 線形補間
fitted_kh = fitted(x)
return fitted_kh / 1e6 # N/mm2
def top_condition_to_stiffness(mode, condition, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, force) -> float:
condition = float(condition)
if condition == 1.0:
k0 = np.inf
elif condition == 0.0:
k0 = 10e-15
else:
k0 = half_condition_solver(mode, bottom_condition, condition, div_num, div_size, stiffness, diameter, kh0s, force)
return k0
def half_condition_solver(mode, bottom_condition, condition, div_num, div_size, stiffness, diameter, kh0s, force):
y_at_fix, _ = solve_y(mode, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, np.inf, force)
y_at_pin, _ = solve_y(mode, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, 10e-15, force)
moment_at_fix = -np.gradient(np.gradient(y_at_fix, div_size), div_size)[2] * stiffness
degree_at_pin = np.gradient(y_at_pin, div_size)[2]
condition = float(condition)
half_moment = moment_at_fix * condition
half_degree = degree_at_pin * (1 - condition)
k0 = half_moment / half_degree
err = 1.1e6
while err > 1.0e6:
y, _ = solve_y(mode, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
m = -np.gradient(np.gradient(y, div_size), div_size)[2] * stiffness
err = m - half_moment
k0 = k0 * half_moment / m
return k0
def pile_stiffness(diameter: float, thickness: float, thickness_margin: float, material: str) -> float:
sec1 = np.pi * (diameter - thickness_margin * 2) ** 4 / 64
sec2 = np.pi * (diameter - thickness) ** 4 / 64
return (sec1 - sec2) * _YOUNG_MODULES.get(material)
def solve_y(mode, bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force):
if mode == 'liner':
y, dec = deformation_analysis_by_FDM(bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
elif mode == 'non_liner':
y, dec = deformation_analysis_by_non_liner_FDM(bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
elif mode == 'non_liner_single':
y, dec = deformation_analysis_by_non_liner_single_FDM(bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
else:
y, dec = deformation_analysis_by_FDM(bottom_condition, div_num, div_size, stiffness, diameter, kh0s, k0, force)
return y, dec
def deformation_analysis_by_FDM(bottom_condition, div_num: int, div_size: float, stiffness: float, diameter: float, khs: np.ndarray, k0: float, force: float) -> Tuple[np.ndarray, np.ndarray]:
def _left_matrix(bottom_condition, n, h, ei, b, khs, k0):
left = np.zeros((n + 5, n + 5))
# head row
left[0, 0:5] = [-1, 2, 0, -2, 1]
left[1, 0:5] = [0, ei / k0 - h, -2 * ei / k0, ei / k0 + h, 0]
# general row
c1s = 6 + h ** 4 * khs * b / ei
for i in range(2, n + 3):
left[i, i - 2:i + 3] = [1, -4, c1s[i - 2], -4, 1]
# tail row
if bottom_condition == "free":
left[-1, -5:] = [-1, 2, 0, -2, 1]
left[-2, -5:] = [0, 1, -2, 1, 0]
elif bottom_condition == "pin":
left[-1, -5:] = [1, 0, 1, 0, 1]
left[-2, -5:] = [0, 1, 1, 1, 0]
else:
left[-1, -5:] = [1, 0, 1, 0, 1]
left[-2, -5:] = [0, 1, 1, 1, 0]
return left
def _right_matrix(n, h, ei, force):
right = np.zeros(n + 5)
right[0] = -2 * force * h ** 3 / ei
return right
left = _left_matrix(bottom_condition, div_num, div_size, stiffness, diameter, khs, k0)
right = _right_matrix(div_num, div_size, stiffness, force)
y = -np.linalg.solve(left, right) # 連立方程式を解く
dec = np.ones_like(y) # 低減係数(線形計算の場合は1.0)
return y, dec
def deformation_analysis_by_non_liner_FDM(bottom_condition, div_num: int, div_size: float, stiffness: float, diameter: float, khs: np.ndarray, k0: float, force: float) -> Tuple[np.ndarray, np.ndarray]:
dec_khs = khs
y, _ = deformation_analysis_by_FDM(bottom_condition, div_num, div_size, stiffness, diameter, khs, k0, force) # 初期値
err = np.ones_like(khs)
dec = err
while np.any(err > 0.1):
dec = np.where( | np.abs(y) | numpy.abs |
## Created 11 Nov 2019
# Estimating the location in the Mediterranean where floating plastic sinks in 4 months in 2007
import numpy as np
import matplotlib.pyplot as plt
import pickle
from numpy import *
import scipy.linalg
import pandas as pd
import netCDF4 as nc4
import xarray as xr
from scipy.stats.stats import pearsonr
""" Loading location of where plastic sinking data """
fileObject = open('./data/raw/forDelphine.pickle', 'rb')
data = pickle.load(fileObject) # sinking data
lons = pickle.load(fileObject) # longitude
lats = pickle.load(fileObject) # latitude
""" Winter 2007 """ # 1st data point = Feb 2006
w07 = 0+12
w07_data0 = data[w07,:,:]
w07_data = (w07_data0- np.min(w07_data0))/(np.max(w07_data0)-np.min(w07_data0)) # To normalise the data between 0 and 1
""" Spring 2007 """
sp07 = 0+15
sp07_data0 = data[sp07,:,:]
sp07_data = (sp07_data0- np.min(sp07_data0))/(np.max(sp07_data0)-np.min(sp07_data0))
""" Summer 2007 """
su07 = 0+18
su07_data0 = data[su07,:,:]
su07_data = (su07_data0- np.min(su07_data0))/( | np.max(su07_data0) | numpy.max |
# Copyright (c) 2020-2021 The Center for Theoretical Biological Physics (CTBP) - Rice University
# This file is from the Open-MiChroM project, released under the MIT License.
R"""
The :class:`~.ChromDynamics` classes perform chromatin dynamics based on the compartment annotations sequence of chromosomes. The simulations can be performed either using the default parameters of MiChroM (Minimal Chromatin Model) or using custom values for the type-to-type and Ideal Chromosome parameters..
"""
from simtk.openmm.app import *
import simtk.openmm as openmm
import simtk.unit as units
from sys import stdout, argv
import numpy as np
from six import string_types
import os
import time
import random
import h5py
from scipy.spatial import distance
import scipy as sp
import itertools
from pandas import DataFrame
class MiChroM:
R"""
The :class:`~.MiChroM` class performs chromatin dynamics employing the default MiChroM energy function parameters for the type-to-type and Ideal Chromosome interactions.
Details about the MiChroM (Minimal Chromatin Model) energy function and the default parameters are decribed in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173."
The :class:`~.MiChroM` sets the environment to start the chromatin dynamics simulations.
Args:
time_step (float, required):
Simulation time step in units of :math:`\tau`. (Default value = 0.01).
collision_rate (float, required):
Friction/Damping constant in units of reciprocal time (:math:`1/\tau`). (Default value = 0.1).
temperature (float, required):
Temperature in reduced units. (Default value = 1.0).
verbose (bool, optional):
Whether to output the information in the screen during the simulation. (Default value: :code:`False`).
velocity_reinitialize (bool, optional):
Reset/Reinitialize velocities if :math:`E_{kin}` is greater than 5.0. (Default value: :code:`True`).
name (str):
Name used in the output files. (Default value: *Chromosome*).
length_scale (float, required):
Length scale used in the distances of the system in units of reduced length :math:`\sigma`. (Default value = 1.0).
mass_scale (float, required):
Mass scale used in units of :math:`\mu`. (Default value = 1.0).
"""
def __init__(
self, time_step=0.01, collision_rate=0.1, temperature=1.0,
verbose=False,
velocity_reinitialize=True,
name="Chromosome",
length_scale=1.0,
mass_scale=1.0):
self.name = name
self.timestep = time_step
self.collisionRate = collision_rate
self.temperature = temperature * 120.0
self.verbose = verbose
self.velocityReinitialize = velocity_reinitialize
self.loaded = False
self.forcesApplied = False
self.folder = "."
self.metadata = {}
self.length_scale = length_scale
self.mass_scale = mass_scale
self.eKcritical = 50000000
self.nm = units.meter * 1e-9
self.Sigma = 1.0
self.Epsilon = 1.0
##################### A1 A2 B1 B2 B3 B4 NA
self.inter_Chrom_types =[-0.268028,-0.274604,-0.262513,-0.258880,-0.266760,-0.266760,-0.225646, #A1
-0.274604,-0.299261,-0.286952,-0.281154,-0.301320,-0.301320,-0.245080, #A2
-0.262513,-0.286952,-0.342020,-0.321726,-0.336630,-0.336630,-0.209919, #B1
-0.258880,-0.281154,-0.321726,-0.330443,-0.329350,-0.329350,-0.282536, #B2
-0.266760,-0.301320,-0.336630,-0.329350,-0.341230,-0.341230,-0.349490, #B3
-0.266760,-0.301320,-0.336630,-0.329350,-0.341230,-0.341230,-0.349490, #B4
-0.225646,-0.245080,-0.209919,-0.282536,-0.349490,-0.349490,-0.255994] #NA
def setup(self, platform="CUDA", PBC=False, PBCbox=None, GPU="default",
integrator="langevin", errorTol=None, precision="mixed",deviceIndex="0"):
R"""Sets up the parameters of the simulation OpenMM platform.
Args:
platform (str, optional):
Platform to use in the simulations. Opitions are *CUDA*, *OpenCL*, *HIP*, *CPU*, *Reference*. (Default value: *CUDA*).
PBC (bool, optional)
Whether to use periodic boundary conditions. (Default value: :code:`False`).
PBCbox ([float,float,float], optional):
Define size of the bounding box for PBC. (Default value: :code:`None`).
GPU ( :math:`0` or :math:`1`, optional):
Switch to another GPU. Machines with one GPU automatically select the right GPU. Machines with two or more GPUs select GPU that is less used.
integrator (str):
Integrator to use in the simulations. Options are *langevin*, *variableLangevin*, *verlet*, *variableVerlet* and, *brownian*. (Default value: *langevin*).
verbose (bool, optional):
Whether to output the information in the screen during the simulation. (Default value: :code:`False`).
deviceIndex (str, optional):
Set of Platform device index IDs. Ex: 0,1,2 for the system to use the devices 0, 1 and 2. (Use only when GPU != default)
errorTol (float, required if **integrator** = *variableLangevin*):
Error tolerance parameter for *variableLangevin* integrator.
"""
self.step = 0
if PBC == True:
self.metadata["PBC"] = True
precision = precision.lower()
if precision not in ["mixed", "single", "double"]:
raise ValueError("Presision must be mixed, single or double")
self.kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
self.kT = self.kB * self.temperature
self.mass = 10.0 * units.amu * self.mass_scale
self.bondsForException = []
self.mm = openmm
self.system = self.mm.System()
self.PBC = PBC
if self.PBC == True:
if PBCbox is None:
data = self.getPositions()
data -= np.min(data, axis=0)
datasize = 1.1 * (2 + (np.max(self.getPositions(), axis=0) - np.min(self.getPositions(), axis=0)))
self.SolventGridSize = (datasize / 1.1) - 2
print("density is ", self.N / (datasize[0]
* datasize[1] * datasize[2]))
else:
PBCbox = np.array(PBCbox)
datasize = PBCbox
self.metadata["PBCbox"] = PBCbox
self.system.setDefaultPeriodicBoxVectors([datasize[0], 0.,
0.], [0., datasize[1], 0.], [0., 0., datasize[2]])
self.BoxSizeReal = datasize
self.GPU = str(GPU)
properties = {}
if self.GPU.lower() != "default":
properties["DeviceIndex"] = deviceIndex
properties["Precision"] = precision
self.properties = properties
if platform.lower() == "opencl":
platformObject = self.mm.Platform.getPlatformByName('OpenCL')
elif platform.lower() == "reference":
platformObject = self.mm.Platform.getPlatformByName('Reference')
elif platform.lower() == "cuda":
platformObject = self.mm.Platform.getPlatformByName('CUDA')
elif platform.lower() == "cpu":
platformObject = self.mm.Platform.getPlatformByName('CPU')
elif platform.lower() == "hip":
platformObject = self.mm.Platform.getPlatformByName('HIP')
else:
self.exit("\n!!!! Unknown platform !!!!\n")
self.platform = platformObject
self.forceDict = {}
self.integrator_type = integrator
if isinstance(integrator, string_types):
integrator = str(integrator)
if integrator.lower() == "langevin":
self.integrator = self.mm.LangevinIntegrator(self.temperature,
self.collisionRate, self.timestep)
elif integrator.lower() == "variablelangevin":
self.integrator = self.mm.VariableLangevinIntegrator(self.temperature,
self.collisionRate, errorTol)
elif integrator.lower() == "verlet":
self.integrator = self.mm.VariableVerletIntegrator(self.timestep)
elif integrator.lower() == "variableverlet":
self.integrator = self.mm.VariableVerletIntegrator(errorTol)
elif integrator.lower() == 'brownian':
self.integrator = self.mm.BrownianIntegrator(self.temperature,
self.collisionRate, self.timestep)
else:
print ('please select from "langevin", "variablelangevin", '
'"verlet", "variableVerlet", '
'"brownian" or provide an integrator object')
else:
self.integrator = integrator
self.integrator_type = "UserDefined"
def saveFolder(self, folder):
R"""Sets the folder path to save data.
Args:
folder (str, optional):
Folder path to save the simulation data. If the folder path does not exist, the function will create the directory.
"""
if os.path.exists(folder) == False:
os.mkdir(folder)
self.folder = folder
def loadStructure(self, filename,center=True,masses=None):
R"""Loads the 3D position of each bead of the chromosome polymer in the OpenMM system platform.
Args:
center (bool, optional):
Whether to move the center of mass of the chromosome to the 3D position ``[0, 0, 0]`` before starting the simulation. (Default value: :code:`True`).
masses (array, optional):
Masses of each chromosome bead measured in units of :math:`\mu`. (Default value: :code:`None`).
"""
data = filename
data = np.asarray(data, float)
if len(data) == 3:
data = np.transpose(data)
if len(data[0]) != 3:
self._exitProgram("Wrong file format")
if np.isnan(data).any():
self._exitProgram("\n!!!! The file contains NAN's !!!!\n")
if center is True:
av = np.mean(data, 0)
data -= av
if center == "zero":
minvalue = np.min(data, 0)
data -= minvalue
self.setPositions(data)
if masses == None:
self.masses = [1. for _ in range(self.N)]
else:
self.masses = masses
if not hasattr(self, "chains"):
self.setChains()
def setChains(self, chains=[(0, None, 0)]):
R"""Sets configuration of the chains in the system. This information is later used for adding Bonds and Angles of the Homopolymer potential.
Args:
chains (list of tuples, optional):
The list of chains in the format [(start, end, isRing)]. isRing is a boolean whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). The particle range should be semi-open, i.e., a chain :math:`(0,3,0)` links the particles :math:`0`, :math:`1`, and :math:`2`. If :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. The default value links all particles of the system into one chain. (Default value: :code:`[(0, None, 0)]`).
"""
self.chains = [i for i in chains]
for i in range(len(self.chains)):
start, end, isRing = self.chains[i]
self.chains[i] = (start, end, isRing)
def setPositions(self, beadsPos , random_offset = 1e-5):
R"""Sets the 3D position of each bead of the chromosome polymer in the OpenMM system platform.
Args:
beadsPos (:math:`(N, 3)` :class:`numpy.ndarray`):
Array of XYZ positions for each bead (locus) in the polymer model.
random_offset (float, optional):
A small increment in the positions to avoid numeral instability and guarantee that a *float* parameter will be used. (Default value = 1e-5).
"""
data = np.asarray(beadsPos, dtype="float")
if random_offset:
data = data + (np.random.random(data.shape) * 2 - 1) * random_offset
self.data = units.Quantity(data, self.nm)
self.N = len(self.data)
if hasattr(self, "context"):
self.initPositions()
def getPositions(self):
R"""
Returns:
:math:`(N, 3)` :class:`numpy.ndarray`:
Returns an array of positions.
"""
return np.asarray(self.data / self.nm, dtype=np.float32)
def randomizePositions(self):
R"""
Runs automatically to offset the positions if it is an integer (int) variable.
"""
data = self.getPositions()
data = data + np.random.randn(*data.shape) * 0.0001
self.setPositions(data)
def getLoops(self, looplists):
R"""
Get the loop position (CTFC anchor points) for each chromosome.
.. note:: For Multi-chain simulations, the ordering of the loop list files is important! The order of the files should be the same as used in the other functions.
Args:
looplists (text file):
A two-column text file containing the index *i* and *j* of a loci pair that form loop interactions.
"""
self.loopPosition = []
for file, chain in zip(looplists,self.chains):
aFile = open(file,'r')
pos = aFile.read().splitlines()
m = int(chain[0])
for t in range(len(pos)):
pos[t] = pos[t].split()
pos[t][0] = int(pos[t][0]) +m
pos[t][1] = int(pos[t][1]) +m
self.loopPosition.append(pos[t])
def addFlatBottomHarmonic(self, kr=5*10**-3, n_rad=10.0):
R"""
Sets a Flat-Bottom Harmonic potential to collapse the chromosome chain inside the nucleus wall. The potential is defined as: :math:`step(r-r0) * (kr/2)*(r-r0)^2`.
Args:
kr (float, required):
Spring constant. (Default value = 5e-3).
n_rad (float, required):
Nucleus wall radius in units of :math:`\sigma`. (Default value = 10.0).
"""
restraintForce = self.mm.CustomExternalForce("step(r-r_res) * 0.5 * kr * (r-r_res)^2; r=sqrt(x*x+y*y+z*z)")
restraintForce.addGlobalParameter('r_res', n_rad)
restraintForce.addGlobalParameter('kr', kr)
for i in range(self.N):
restraintForce.addParticle(i, [])
self.forceDict["FlatBottomHarmonic"] = restraintForce
def addSphericalConfinementLJ(self, r="density", density=0.1):
R"""
Sets the nucleus wall potential according to MiChroM Energy function. The confinement potential describes the interaction between the chromosome and a spherical wall.
Args:
r (float or str="density", optional):
Radius of the nucleus wall. If **r="density"** requires a **density** value.
density (float, required if **r="density"**):
Density of the chromosome beads inside the nucleus. (Default value = 0.1).
"""
spherForce = self.mm.CustomExternalForce("(4 * GROSe * ((GROSs/r)^12 - (GROSs/r)^6) + GROSe) * step(GROScut - r);"
"r= R - sqrt(x^2 + y^2 + z^2) ")
self.forceDict["SphericalConfinementLJ"] = spherForce
for i in range(self.N):
spherForce.addParticle(i, [])
if r == "density":
r = (3 * self.N / (4 * 3.141592 * density)) ** (1 / 3.)
self.sphericalConfinementRadius = r
spherForce.addGlobalParameter('R', r)
spherForce.addGlobalParameter('GROSe', 1.0)
spherForce.addGlobalParameter('GROSs', 1.0)
spherForce.addGlobalParameter("GROScut", 2.**(1./6.))
return r
def addFENEBonds(self, kfb=30.0):
R"""
Adds FENE (Finite Extensible Nonlinear Elastic) bonds between neighbor loci :math:`i` and :math:`i+1` according to "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2011. Molecular dynamics simulation study of nonconcatenated ring polymers in a melt. I. Statics. The Journal of chemical physics, 134(20), p.204904".
Args:
kfb (float, required):
Bond coefficient. (Default value = 30.0).
"""
for start, end, isRing in self.chains:
for j in range(start, end):
self.addBond(j, j + 1, kfb=kfb)
self.bondsForException.append((j, j + 1))
if isRing:
self.addBond(start, end, distance=1, kfb=kfb)
self.bondsForException.append((start, end ))
self.metadata["FENEBond"] = repr({"kfb": kfb})
def _initFENEBond(self, kfb=30):
R"""
Internal function that inits FENE bond force.
"""
if "FENEBond" not in list(self.forceDict.keys()):
force = ("- 0.5 * kfb * r0 * r0 * log(1-(r/r0)*(r/r0)) + (4 * e * ((s/r)^12 - (s/r)^6) + e) * step(cut - r)")
bondforceGr = self.mm.CustomBondForce(force)
bondforceGr.addGlobalParameter("kfb", kfb)
bondforceGr.addGlobalParameter("r0", 1.5)
bondforceGr.addGlobalParameter('e', 1.0)
bondforceGr.addGlobalParameter('s', 1.0)
bondforceGr.addGlobalParameter("cut", 2.**(1./6.))
self.forceDict["FENEBond"] = bondforceGr
def addBond(self, i, j, distance=None, kfb=30):
R"""
Adds bonds between loci :math:`i` and :math:`j`
Args:
kfb (float, required):
Bond coefficient. (Default value = 30.0).
i (int, required):
Locus index **i**.
j (int, required):
Locus index **j**
"""
if (i >= self.N) or (j >= self.N):
raise ValueError("\n Cannot add a bond between beads %d,%d that are beyond the chromosome length %d" % (i, j, self.N))
if distance is None:
distance = self.length_scale
else:
distance = self.length_scale * distance
distance = float(distance)
self._initFENEBond(kfb=kfb)
self.forceDict["FENEBond"].addBond(int(i), int(j), [])
def addAngles(self, ka=2.0):
R"""
Adds an angular potential between bonds connecting beads :math:`i − 1, i` and :math:`i, i + 1` according to "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2011. Molecular dynamics simulation study of nonconcatenated ring polymers in a melt. I. Statics. The Journal of chemical physics, 134(20), p.204904".
Args:
ka (float, required):
Angle potential coefficient. (Default value = 2.0).
"""
try:
ka[0]
except:
ka = np.zeros(self.N, float) + ka
angles = self.mm.CustomAngleForce(
"ka * (1 - cos(theta - 3.141592))")
angles.addPerAngleParameter("ka")
for start, end, isRing in self.chains:
for j in range(start + 1, end):
angles.addAngle(j - 1, j, j + 1, [ka[j]])
if isRing:
angles.addAngle(end - 1, end , start, [ka[end]])
angles.addAngle(end , start, start + 1, [ka[start]])
self.metadata["AngleForce"] = repr({"stiffness": ka})
self.forceDict["AngleForce"] = angles
def addRepulsiveSoftCore(self, Ecut=4.0):
R"""
Adds a soft-core repulsive interaction that allows chain crossing, which represents the activity of topoisomerase II. Details can be found in the following publications:
- <NAME>., A.B., <NAME>., <NAME>. and <NAME>., 2021. A scalable computational approach for simulating complexes of multiple chromosomes. Journal of Molecular Biology, 433(6), p.166700.
- <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173.
- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2013. Organization of the mitotic chromosome. Science, 342(6161), pp.948-953.
Args:
Ecut (float, required):
Energy cost for the chain passing in units of :math:`k_{b}T`. (Default value = 4.0).
"""
nbCutOffDist = self.Sigma * 2. ** (1. / 6.) #1.112
Ecut = Ecut*self.Epsilon
r_0 = self.Sigma*(((0.5*Ecut)/(4.0*self.Epsilon) - 0.25 +((0.5)**(2.0)))**(1.0/2.0) +0.5)**(-1.0/6.0)
repul_energy = ("LJ * step(r - r_0) * step(CutOff - r)"
" + step(r_0 - r)* 0.5 * Ecut * (1.0 + tanh( (2.0 * LJ/Ecut) - 1.0 ));"
"LJ = 4.0 * Epsi * ((Sig/r)^12 - (Sig/r)^6) + Epsi")
self.forceDict["RepulsiveSoftCore"] = self.mm.CustomNonbondedForce(
repul_energy)
repulforceGr = self.forceDict["RepulsiveSoftCore"]
repulforceGr.addGlobalParameter('Epsi', self.Epsilon)
repulforceGr.addGlobalParameter('Sig', self.Sigma)
repulforceGr.addGlobalParameter('Ecut', Ecut)
repulforceGr.addGlobalParameter('r_0', r_0)
repulforceGr.addGlobalParameter('CutOff', nbCutOffDist)
repulforceGr.setCutoffDistance(3.0)
for _ in range(self.N):
repulforceGr.addParticle(())
def addTypetoType(self, mu=3.22, rc = 1.78 ):
R"""
Adds the type-to-type interactions according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
"""
self.metadata["TypetoType"] = repr({"mu": mu})
if not hasattr(self, "type_list"):
self.type_list = self.random_ChromSeq(self.N)
energy = "mapType(t1,t2)*0.5*(1. + tanh(mu*(rc - r)))*step(r-1.0)"
crossLP = self.mm.CustomNonbondedForce(energy)
crossLP.addGlobalParameter('mu', mu)
crossLP.addGlobalParameter('rc', rc)
crossLP.setCutoffDistance(3.0)
fTypes = self.mm.Discrete2DFunction(7,7,self.inter_Chrom_types)
crossLP.addTabulatedFunction('mapType', fTypes)
crossLP.addPerParticleParameter("t")
for i in range(self.N):
value = [float(self.type_list[i])]
crossLP.addParticle(value)
self.forceDict["TypetoType"] = crossLP
def addCustomTypes(self, mu=3.22, rc = 1.78, TypesTable=None):
R"""
Adds the type-to-type potential using custom values for interactions between the chromatin types. The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
The function receives a txt/TSV/CSV file containing the upper triangular matrix of the type-to-type interactions. A file example can be found `here <https://www.ndb.rice.edu>`__.
+---+------+-------+-------+
| | A | B | C |
+---+------+-------+-------+
| A | -0.2 | -0.25 | -0.15 |
+---+------+-------+-------+
| B | | -0.3 | -0.15 |
+---+------+-------+-------+
| C | | | -0.35 |
+---+------+-------+-------+
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
TypesTable (file, required):
A txt/TSV/CSV file containing the upper triangular matrix of the type-to-type interactions. (Default value: :code:`None`).
"""
self.metadata["CrossLink"] = repr({"mu": mu})
if not hasattr(self, "type_list"):
self.type_list = self.random_ChromSeq(self.N)
energy = "mapType(t1,t2)*0.5*(1. + tanh(mu*(rc - r)))*step(r-lim)"
crossLP = self.mm.CustomNonbondedForce(energy)
crossLP.addGlobalParameter('mu', mu)
crossLP.addGlobalParameter('rc', rc)
crossLP.addGlobalParameter('lim', 1.0)
crossLP.setCutoffDistance(3.0)
lambdas_full = np.loadtxt(TypesTable, delimiter=',')
lambdas = np.triu(lambdas_full) + np.triu(lambdas_full, k=1).T
diff_types = len(lambdas)
print(len(lambdas))
lambdas = list(np.ravel(lambdas))
fTypes = self.mm.Discrete2DFunction(diff_types,diff_types,lambdas)
crossLP.addTabulatedFunction('mapType', fTypes)
AB_types = self.changeType_list()
crossLP.addPerParticleParameter("t")
for i in range(self.N):
value = [float(AB_types[i])]
crossLP.addParticle(value)
self.forceDict["CustomTypes"] = crossLP
def changeType_list(self):
R"""
Internal function for indexing unique chromatin types.
"""
n = set(self.type_list)
lista = np.array(self.type_list)
k=0
for t in n:
lista[lista==t] = k
k += 1
return(list(lista))
def addLoops(self, mu=3.22, rc = 1.78, X=-1.612990, looplists=None):
R"""
Adds the Loops interactions according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
.. note:: For Multi-chain simulations, the ordering of the loop list files is important! The order of the files should be the same as used in the other functions.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
X (float, required):
Loop interaction parameter. (Default value = -1.612990).
looplists (file, optional):
A two-column text file containing the index *i* and *j* of a loci pair that form loop interactions. (Default value: :code:`None`).
"""
ELoop = "qsi*0.5*(1. + tanh(mu*(rc - r)))"
Loop = self.mm.CustomBondForce(ELoop)
Loop.addGlobalParameter('mu', mu)
Loop.addGlobalParameter('rc', rc)
Loop.addGlobalParameter('qsi', X)
self.getLoops(looplists)
for p in self.loopPosition:
Loop.addBond(p[0]-1,p[1]-1)
self.forceDict["Loops"] = Loop
def addCustomIC(self, mu=3.22, rc = 1.78, dinit=3, dend=200, IClist=None):
R"""
Adds the Ideal Chromosome potential using custom values for interactions between beads separated by a genomic distance :math:`d`. The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 200).
IClist (file, optional):
A one-column text file containing the energy interaction values for loci *i* and *j* separated by a genomic distance :math:`d`. (Default value: :code:`None`).
"""
energyIC = ("step(d-dinit)*IClists(d)*step(dend -d)*f*step(r-lim);"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx2-idx1)")
IC = self.mm.CustomNonbondedForce(energyIC)
IClist = np.append(np.zeros(dend),IClist)[:-dend]
tabIClist = self.mm.Discrete1DFunction(IClist)
IC.addTabulatedFunction('IClist', tabIClist)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.addGlobalParameter('lim', 1.0)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["CustomIC"] = IC
def addIdealChromosome(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500):
R"""
Adds the Ideal Chromosome potential for interactions between beads separated by a genomic distance :math:`d` according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend -d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3.0)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome"] = IC
def addMultiChainIC(self, mu=3.22, rc = 1.78, Gamma1=-0.030,Gamma2=-0.351,
Gamma3=-3.727, dinit=3, dend=500, chains=None):
R"""
Adds the Ideal Chromosome potential for multiple chromosome simulations. The interactions between beads separated by a genomic distance :math:`d` is applied according to the MiChroM energy function parameters reported in "<NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Transferable model for chromosome architecture. Proceedings of the National Academy of Sciences, 113(43), pp.12168-12173".
The set of parameters :math:`\{\gamma_d\}` of the Ideal Chromosome potential is fitted in a function: :math:`\gamma(d) = \frac{\gamma_1}{\log{(d)}} +\frac{\gamma_2}{d} +\frac{\gamma_3}{d^2}`.
The parameters :math:`\mu` (mu) and rc are part of the probability of crosslink function :math:`f(r_{i,j}) = \frac{1}{2}\left( 1 + tanh\left[\mu(r_c - r_{i,j}\right] \right)`, where :math:`r_{i,j}` is the spatial distance between loci (beads) *i* and *j*.
Args:
mu (float, required):
Parameter in the probability of crosslink function. (Default value = 3.22).
rc (float, required):
Parameter in the probability of crosslink function, :math:`f(rc) = 0.5`. (Default value = 1.78).
Gamma1 (float, required):
Ideal Chromosome parameter. (Default value = -0.030).
Gamma2 (float, required):
Ideal Chromosome parameter. (Default value = -0.351).
Gamma3 (float, required):
Ideal Chromosome parameter. (Default value = -3.727).
dinit (int, required):
The first neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 3).
dend (int, required):
The last neighbor in sequence separation (Genomic Distance) to be considered in the Ideal Chromosome potential. (Default value = 500).
chains (list of tuples, optional):
The list of chains in the format [(start, end, isRing)]. isRing is a boolean whether the chromosome chain is circular or not (Used to simulate bacteria genome, for example). The particle range should be semi-open, i.e., a chain :math:`(0,3,0)` links the particles :math:`0`, :math:`1`, and :math:`2`. If :code:`bool(isRing)` is :code:`True` , the first and last particles of the chain are linked, forming a ring. The default value links all particles of the system into one chain. (Default value: :code:`[(0, None, 0)]`).
"""
energyIC = ("step(d-dinit)*(gamma1/log(d) + gamma2/d + gamma3/d^2)*step(dend-d)*f;"
"f=0.5*(1. + tanh(mu*(rc - r)));"
"d=abs(idx1-idx2)")
IC = self.mm.CustomNonbondedForce(energyIC)
IC.addGlobalParameter('gamma1', Gamma1)
IC.addGlobalParameter('gamma2', Gamma2)
IC.addGlobalParameter('gamma3', Gamma3)
IC.addGlobalParameter('dinit', dinit)
IC.addGlobalParameter('dend', dend)
IC.addGlobalParameter('mu', mu)
IC.addGlobalParameter('rc', rc)
IC.setCutoffDistance(3)
groupList = list(range(chains[0],chains[1]+1))
IC.addInteractionGroup(groupList,groupList)
IC.addPerParticleParameter("idx")
for i in range(self.N):
IC.addParticle([i])
self.forceDict["IdealChromosome_chain_"+str(chains[0])] = IC
def _loadParticles(self):
R"""
Internal function that loads the chromosome beads into the simulations system.
"""
if not hasattr(self, "system"):
return
if not self.loaded:
for mass in self.masses:
self.system.addParticle(self.mass * mass)
if self.verbose == True:
print("%d particles loaded" % self.N)
self.loaded = True
def _applyForces(self):
R"""Internal function that adds all loci to the system and applies all the forces present in the forcedict."""
if self.forcesApplied == True:
return
self._loadParticles()
exc = self.bondsForException
print("Number of exceptions:", len(exc))
if len(exc) > 0:
exc = np.array(exc)
exc = | np.sort(exc, axis=1) | numpy.sort |
# TODO: Tests for features that are just called
# TODO: Test for trend='ctt'
from arch.compat.statsmodels import dataset_loader
import os
from typing import NamedTuple, Optional
import warnings
import numpy as np
from numpy import ceil, diff, log, polyval
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
import pandas as pd
import pytest
import scipy.stats as stats
from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.stattools import _autolag, lagmat
from arch.unitroot import ADF, DFGLS, KPSS, PhillipsPerron, VarianceRatio, ZivotAndrews
from arch.unitroot.critical_values.dickey_fuller import tau_2010
from arch.unitroot.unitroot import (
_autolag_ols,
_autolag_ols_low_memory,
_is_reduced_rank,
auto_bandwidth,
mackinnoncrit,
mackinnonp,
)
from arch.utility.exceptions import InfeasibleTestException
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
BASE_PATH = os.path.split(os.path.abspath(__file__))[0]
DATA_PATH = os.path.join(BASE_PATH, "data")
ZIVOT_ANDREWS_DATA = pd.read_csv(
os.path.join(DATA_PATH, "zivot-andrews.csv"), index_col=0
)
# Time series to test the autobandwidth method against its implementation under R
REAL_TIME_SERIES = [8, 9, 2, 4, 8, 9, 9, 4, 4, 9, 7, 1, 1, 9, 4, 9, 3]
TRUE_BW_FROM_R_BA = 3.033886
TRUE_BW_FROM_R_PA = 7.75328
TRUE_BW_FROM_R_QS = 3.851586
class TestUnitRoot(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(12345)
data = dataset_loader(macrodata)
cls.cpi = log(data["cpi"])
cls.realgdp = data["realgdp"]
cls.inflation = diff(cls.cpi)
cls.inflation_change = diff(cls.inflation)
def test_adf_no_options(self):
adf = ADF(self.inflation)
assert_almost_equal(adf.stat, -3.09310, DECIMAL_4)
assert_equal(adf.lags, 2)
assert_almost_equal(adf.pvalue, 0.027067, DECIMAL_4)
adf.regression.summary()
adf2 = ADF(self.inflation, low_memory=True)
assert_equal(adf2.lags, 2)
def test_adf_no_lags(self):
adf = ADF(self.inflation, lags=0).stat
assert_almost_equal(adf, -6.56880, DECIMAL_4)
def test_adf_nc_no_lags(self):
adf = ADF(self.inflation, trend="n", lags=0)
assert_almost_equal(adf.stat, -3.88845, DECIMAL_4)
# 16.239
def test_adf_c_no_lags(self):
adf = ADF(self.inflation, trend="c", lags=0)
assert_almost_equal(adf.stat, -6.56880, DECIMAL_4)
assert_equal(adf.nobs, self.inflation.shape[0] - adf.lags - 1)
def test_adf_ct_no_lags(self):
adf = ADF(self.inflation, trend="ct", lags=0)
| assert_almost_equal(adf.stat, -6.66705, DECIMAL_4) | numpy.testing.assert_almost_equal |
#!/usr/bin env python3
import rospy
import tf
import numpy as np
from geometry_msgs.msg import Point, PoseStamped, PoseWithCovarianceStamped, TwistStamped
from apriltag_ros.msg import AprilTagDetectionArray
"""
class kalman filter subscribes to lateral position of tag and makes an estimate on position of
of tag wrt to quad based on the camera sensor also makes an estimate on how fast tag is moving
"""
class KalmanFilter():
def __init__(self, F = None, B = None, H = None, Q = None, R = None, P = None, x0 = None):
if(F is None or H is None):
raise ValueError("Set proper system dynamics.")
self.n = F.shape[1]
self.m = H.shape[1]
self.F = F
self.H = H
self.B = 0 if B is None else B
self.Q = np.eye(self.n) if Q is None else Q
self.R = np.eye(self.n) if R is None else R
self.P = np.eye(self.n) if P is None else P
self.x = np.zeros((self.n, 1)) if x0 is None else x0
self.z = [0] * len(self.H)
#this is the apriltag position subscriber
rospy.Subscriber("tag/pose", PoseStamped, self.tagpose_cb)
self.kf_pub = rospy.Publisher("kf_tag/pose", PoseStamped, queue_size=10)
self.kf_vel_pub = rospy.Publisher("kf_tag/vel", TwistStamped, queue_size=10)
def predict(self, u = 0):
self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
self.publish_kf_est() #publish the kf estimates for position and vel of tag
self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
return self.x
def update(self):
y = self.z - np.dot(self.H, self.x)
S = self.R + np.dot(self.H, np.dot(self.P, self.H.T))
K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
self.x = self.x + np.dot(K, y) #udpate state matrix
I = | np.eye(self.n) | numpy.eye |
"""Processing data for criteo kaggle dataset"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from csv import DictReader
import os
import numpy as np
import mxnet as mx
def get_uci_criteo(data_dir, data_name):
"""Get preprocessed data to feed into model"""
data_file = os.path.join(data_dir, data_name)
if (not os.path.exists(data_file)):
print("Dataset " + data_file + " not present")
csr, dns, label = preprocess_uci_criteo(data_name)
return csr, dns, label
# Label - Target variable that indicates if an ad was clicked (1) or not (0).
# I1-I13 - A total of 13 columns of integer features (mostly count features).
# C1-C26 - A total of 26 columns of categorical features. The values of
# these features have been hashed onto 32 bits for anonymization purposes.
CONTINUOUS_COLUMNS = ["I"+str(i) for i in range(1, 14)] # 1-13 inclusive
CATEGORICAL_COLUMNS = ["C"+str(i) for i in range(1, 27)] # 1-26 inclusive
LABEL_COLUMN = ["clicked"]
TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
max_dict = {'I1': 1539, 'I2': 22066, 'I3': 65535, 'I4': 561, 'I5': 2655388, 'I6': 233523,
'I7': 26297, 'I8': 5106, 'I9': 24376, 'I10': 9, 'I11': 181, 'I12': 1807, 'I13': 6879}
min_dict = {'I1': 0, 'I2': -3, 'I3': 0, 'I4': 0, 'I5': 0, 'I6': 0, 'I7': 0, 'I8': 0,
'I9': 0, 'I10': 0, 'I11': 0, 'I12': 0, 'I13': 0}
def preprocess_uci_criteo(data_name):
"""Data preprocessing for criteo kaggle dataset"""
hash_bucket_size = 1000
#cont_defaults = [[0] for i in range(1, 14)]
#cate_defaults = [[" "] for i in range(1, 27)]
#label_defaults = [[0]]
#column_headers = TRAIN_DATA_COLUMNS
#record_defaults = label_defaults + cont_defaults + cate_defaults
label_list = []
csr_list = []
dns_list = []
#csr_ncols = len(CATEGORICAL_COLUMNS) * hash_bucket_size
dns_ncols = len(CONTINUOUS_COLUMNS) + len(CATEGORICAL_COLUMNS)
with open(data_name) as f:
for row in DictReader(f, fieldnames=TRAIN_DATA_COLUMNS):
label_list.append(row['clicked'])
# Sparse base columns.
for name in CATEGORICAL_COLUMNS:
csr_list.append((hash(row[name]) % hash_bucket_size, 1.0))
dns_row = [0] * dns_ncols
dns_dim = 0
# Embed wide columns into deep columns
for col in CATEGORICAL_COLUMNS:
dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size
dns_dim += 1
# Continuous base columns.
scale = 1 #align with Google WnD paper
for col in CONTINUOUS_COLUMNS:
#dns_row[dns_dim] = float(row[col].strip())
orig_range = float(max_dict[col] - min_dict[col])
dns_row[dns_dim] = (float(row[col].strip()) - min_dict[col]) * scale / orig_range
dns_dim += 1
# No transformations.
dns_list.append(dns_row)
data_list = [item[1] for item in csr_list]
indices_list = [item[0] for item in csr_list]
indptr_list = range(0, len(indices_list) + 1, len(CATEGORICAL_COLUMNS))
csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list),
shape=(len(label_list), hash_bucket_size * len(CATEGORICAL_COLUMNS)))
dns = | np.array(dns_list) | numpy.array |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import csv
import os.path
from download import download
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
def define_model(info: dict, level: str = "stock"):
"""
Define and return graphical model.
Parameters
----------
info: dict
Data information.
level: str
Level of the model; possible candidates are "stock", "industry", "sector" and "market".
"""
tt = info['tt']
order_scale = info['order_scale']
order = len(order_scale) - 1
num_sectors = info['num_sectors']
sec2ind_id = info['sector_industries_id']
ind_id = info['industries_id']
available_levels = ["market", "sector", "industry", "stock"]
if level not in available_levels:
raise Exception("Selected level is unknown. Please provide one of the following levels: {}.".format(available_levels))
m = [tfd.Normal(loc=tf.zeros([1, order + 1]), scale=4 * order_scale), # phi_m
tfd.Normal(loc=0, scale=4)] # psi_m
if level != "market":
m += [lambda psi_m, phi_m: tfd.Normal(loc=tf.repeat(phi_m, num_sectors, axis=0), scale=2 * order_scale), # phi_s
lambda phi_s, psi_m: tfd.Normal(loc=psi_m, scale=2 * tf.ones([num_sectors, 1]))] # psi_s
if level != "sector":
sec2ind_id = info['sector_industries_id']
m += [lambda psi_s, phi_s: tfd.Normal(loc=tf.gather(phi_s, sec2ind_id, axis=0), scale=order_scale), # phi_i
lambda phi_i, psi_s: tfd.Normal(loc=tf.gather(psi_s, sec2ind_id, axis=0), scale=1)] # psi_ii
if level != "industry":
ind_id = info['industries_id']
m += [lambda psi_i, phi_i: tfd.Normal(loc=tf.gather(phi_i, ind_id, axis=0), scale=0.5 * order_scale), # phi
lambda phi, psi_i: tfd.Normal(loc=tf.gather(psi_i, ind_id, axis=0), scale=0.5)] # psi
if level == "market":
m += [lambda psi_m, phi_m: tfd.Normal(loc=tf.tensordot(phi_m, tt, axes=1), scale=tf.math.softplus(psi_m))] # y
if level == "sector":
m += [lambda psi_s, phi_s: tfd.Normal(loc=tf.tensordot(phi_s, tt, axes=1), scale=tf.math.softplus(psi_s))] # y
if level == "industry":
m += [lambda psi_i, phi_i: tfd.Normal(loc=tf.tensordot(phi_i, tt, axes=1), scale=tf.math.softplus(psi_i))] # y
if level == "stock":
m += [lambda psi, phi: tfd.Normal(loc=tf.tensordot(phi, tt, axes=1), scale=tf.math.softplus(psi))] # y
return tfd.JointDistributionSequentialAutoBatched(m)
def training(logp: np.array, info: dict, learning_rate: float = 0.01, num_steps: int = 20000, plot_losses: bool = False):
"""
It performs sequential optimization over the model parameters via Adam optimizer, training at different levels to
provide sensible initial solutions at finer levels.
Parameters
----------
logp: np.array
Log-price at stock-level.
info: dict
Data information.
learning_rate: float
Adam's fixed learning rate.
num_steps: int
Adam's fixed number of iterations.
plot_losses: bool
If True, a losses decay plot is saved in the current directory.
Returns
-------
It returns trained parameters.
"""
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
num_steps_l = int(np.ceil(num_steps // 4))
# market
model = define_model(info, "market")
phi_m, psi_m = (tf.Variable(tf.zeros_like(model.sample()[:2][i])) for i in range(2))
loss_m = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, logp.mean(0, keepdims=1)]),
optimizer=optimizer, num_steps=num_steps_l)
# sector
model = define_model(info, "sector")
phi_m, psi_m = tf.constant(phi_m), tf.constant(psi_m)
phi_s, psi_s = (tf.Variable(tf.zeros_like(model.sample()[2:4][i])) for i in range(2))
logp_s = np.array([logp[np.where(np.array(info['sectors_id']) == k)[0]].mean(0) for k in range(info['num_sectors'])])
loss_s = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, logp_s]),
optimizer=optimizer, num_steps=num_steps_l)
# industry
model = define_model(info, "industry")
phi_s, psi_s = tf.constant(phi_s), tf.constant(psi_s)
phi_i, psi_i = (tf.Variable(tf.zeros_like(model.sample()[4:6][i])) for i in range(2))
logp_i = np.array([logp[np.where(np.array(info['industries_id']) == k)[0]].mean(0) for k in range(info['num_industries'])])
loss_i = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, logp_i]),
optimizer=optimizer, num_steps=num_steps_l)
# stock
model = define_model(info, "stock")
phi_i, psi_i = tf.constant(phi_i), tf.constant(psi_i)
phi, psi = (tf.Variable(tf.zeros_like(model.sample()[6:8][i])) for i in range(2))
loss = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi, logp]),
optimizer=optimizer, num_steps=num_steps_l)
if plot_losses:
fig_name = 'losses_decay.png'
fig = plt.figure(figsize=(20, 3))
plt.subplot(141)
plt.title("market-level", fontsize=12)
plt.plot(loss_m)
plt.subplot(142)
plt.title("sector-level", fontsize=12)
plt.plot(loss_s)
plt.subplot(143)
plt.title("industry-level", fontsize=12)
plt.plot(loss_i)
plt.subplot(144)
plt.title("stock-level", fontsize=12)
plt.plot(loss)
plt.legend(["loss decay"], fontsize=12, loc="upper right")
plt.xlabel("iteration", fontsize=12)
fig.savefig(fig_name, dpi=fig.dpi)
print('Losses decay plot has been saved in this directory as {}.'.format(fig_name))
return phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi
def softplus(x: np.array):
"""
It is a function from real to positive numbers
Parameters
----------
x: np.array
Real value.
"""
return np.log(1 + np.exp(x))
def order_selection(logp: np.array, info: dict, orders: np.array = np.arange(1, 14), horizon: int = 5):
"""
It is a function from real to positive numbers
Parameters
----------
logp: np.array
Log-prices at stock-level.
info: dict
Data information.
orders: np.array
Array of candidate orders.
horizon: int
Number of days to evaluate prediction.
"""
print("\nModel selection in progress. This can take a few minutes...")
t = logp[:, :-horizon].shape[1]
min_loss = np.inf
count = 0
for i, order in enumerate(orders):
info['tt'] = (np.linspace(1 / t, 1, t) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
info['order_scale'] = np.linspace(1 / (order + 1), 1, order + 1)[::-1].astype('float32')[None, :]
# training the model
phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = training(logp[:, :-horizon], info)
# construct loss
tt_pred = ((1 + (np.arange(1, 1 + horizon) / t)) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
logp_pred = np.dot(phi.numpy(), tt_pred)
std_logp_pred = softplus(psi.numpy())
scores = (logp_pred - logp[:, -horizon:]) / std_logp_pred
loss = np.abs(np.mean(scores ** 2) - 1)
print("Loss value for backtested polynomial model of order {}: {}.".format(order, loss))
if i > 0 and loss > min_loss:
count += 1
else:
min_loss = loss
min_order = order
count = 0
if count == 3:
break
print("Model selection completed. Volatile will use a polynomial model of degree {}.".format(min_order))
return min_order
if __name__ == '__main__':
cli = ArgumentParser('Volatile: your day-to-day trading companion.',
formatter_class=ArgumentDefaultsHelpFormatter)
cli.add_argument('-s', '--symbols', type=str, nargs='+', help='List of symbols.')
cli.add_argument('--save-table', action='store_true',
help='Save prediction table in csv format.')
cli.add_argument('--no-plots', action='store_true',
help='Plot estimates with their uncertainty over time.')
cli.add_argument('--plot-losses', action='store_true',
help='Plot loss function decay over training iterations.')
args = cli.parse_args()
today = dt.date.today().strftime("%Y-%m-%d")
print('\nDownloading all available closing prices in the last year...')
if args.symbols is None:
with open("symbols_list.txt", "r") as my_file:
args.symbols = my_file.readlines()[0].split(" ")
data = download(args.symbols)
tickers = data["tickers"]
num_stocks, t = data['logp'].shape
# find unique names of sectors
usectors = np.unique(data['sectors'])
num_sectors = len(usectors)
# provide sector IDs at stock-level
sectors_id = [np.where(usectors == sector)[0][0] for sector in data['sectors']]
# find unique names of industries and store indices
uindustries, industries_idx = np.unique(data['industries'], return_index=True)
num_industries = len(uindustries)
# provide industry IDs at stock-level
industries_id = [np.where(uindustries == industry)[0][0] for industry in data['industries']]
# provide sector IDs at industry-level
sector_industries_id = np.array(sectors_id)[industries_idx].tolist()
# place relevant information in dictionary
info = dict(num_sectors=num_sectors, num_industries=num_industries, sector_industries_id=sector_industries_id,
industries_id=industries_id, sectors_id=sectors_id)
# how many days to look ahead when comparing the current price against a prediction
horizon = 5
# order of the polynomial
order = order_selection(data['logp'], info)
print("\nTraining the model...")
# times corresponding to trading dates in the data
info['tt'] = (np.linspace(1 / t, 1, t) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
# reweighing factors for parameters corresponding to different orders of the polynomial
info['order_scale'] = np.linspace(1 / (order + 1), 1, order + 1)[::-1].astype('float32')[None, :]
# training the model
phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = training(data['logp'], info, plot_losses=args.plot_losses)
# calculate stock-level estimators of log-prices
logp_est = np.dot(phi.numpy(), info['tt'])
std_logp_est = softplus(psi.numpy())
# calculate stock-level estimators of prices
p_est = np.exp(logp_est + std_logp_est ** 2 / 2)
std_p_est = np.sqrt(np.exp(2 * logp_est + std_logp_est ** 2) * (np.exp(std_logp_est ** 2) - 1))
# calculate stock-level predictions of log-prices
tt_pred = ((1 + (np.arange(1 + horizon) / t)) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
logp_pred = np.dot(phi.numpy(), tt_pred)
std_logp_pred = softplus(psi.numpy())
# calculate stock-level prediction of prices
p_pred = | np.exp(logp_pred + std_logp_pred ** 2 / 2) | numpy.exp |
from numpy import isnan, take, any, all, logical_or, logical_and, logical_not, atleast_1d, \
asarray, argmin, argsort, abs, isfinite, dot#where
import numpy as np
# for PyPy
from openopt.kernel.nonOptMisc import where
from bisect import bisect_right
from FuncDesigner.Interval import splitDomainForDiscreteVariable
try:
from bottleneck import nanmin
except ImportError:
from numpy import nanmin
def getTruncatedArrays(ind, y, e, indT, _s):
# TODO: rework it when numpy will have appropriate inplace function
s = ind.size
y = take(y, ind, axis=0, out=y[:s])
e = take(e, ind, axis=0, out=e[:s])
_s = _s[ind]
if indT is not None:
indT = indT[ind]
return y, e, indT, _s#, nlh, nlh_0
def adjustDiscreteVarBounds(y, e, p):
# TODO: rework it
#n = p.n
# TODO: remove the cycle, use vectorization
for i in p._discreteVarsNumList:
v = p._freeVarsList[i]
y[:, i], e[:, i] = splitDomainForDiscreteVariable(y[:, i], e[:, i], v)
# ind = y>e
# assert not any(ind)
# y[ind], e[ind] = e[ind], y[ind]
# Ind = any(y>e, 1)
# trunc_ind = where(logical_not(Ind))[0]
# # TODO: is it triggered? // updated: can be from MOP or cons
# if any(Ind):
# ind = where(logical_not(Ind))[0]
# s = ind.size
# y = take(y, ind, axis=0, out=y[:s])
# e = take(e, ind, axis=0, out=e[:s])
# _s = _s[ind]
# if indT is not None:
# indT = indT[ind]
return y, e#, trunc_ind#_s, indT
def func7(y, e, o, a, _s, indT, nlhc, residual):
r10 = logical_and(all(isnan(o), 1), all(isnan(a), 1))
if any(r10):
j = where(logical_not(r10))[0]
lj = j.size
y = take(y, j, axis=0, out=y[:lj])
e = take(e, j, axis=0, out=e[:lj])
o = take(o, j, axis=0, out=o[:lj])
a = take(a, j, axis=0, out=a[:lj])
_s = _s[j]
if indT is not None:
indT = indT[j]
if nlhc is not None:
nlhc = take(nlhc, j, axis=0, out=nlhc[:lj])
if residual is not None:
residual = take(residual, j, axis=0, out=residual[:lj])
return y, e, o, a, _s, indT, nlhc, residual
def func9(an, fo, g, p):
#ind = searchsorted(ar, fo, side='right')
if p.probType in ('NLSP', 'SNLE') and p.maxSolutions != 1:
mino = atleast_1d([node.key for node in an])
ind = mino > 0
if not any(ind):
return an, g
else:
g = nanmin((g, nanmin(mino[ind])))
ind2 = where(logical_not(ind))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
#an = asarray(an[ind2])
an = [an[i] for i in ind2]
return an, g
elif p.solver.dataHandling == 'sorted':
#OLD
mino = [node.key for node in an]
ind = bisect_right(mino, fo)
if ind == len(mino):
return an, g
else:
g = nanmin((g, nanmin(atleast_1d(mino[ind]))))
return an[:ind], g
elif p.solver.dataHandling == 'raw':
#NEW
mino = atleast_1d([node.key for node in an])
r10 = mino > fo
if not any(r10):
return an, g
else:
ind = where(r10)[0]
g = nanmin((g, nanmin(atleast_1d(mino)[ind])))
#an = asarray(an)
ind2 = where(logical_not(r10))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
an = [an[i] for i in ind2]
return an, g
# NEW 2
# curr_tnlh = [node.tnlh_curr for node in an]
# import warnings
# warnings.warn('! fix g')
return an, g
else:
assert 0, 'incorrect nodes remove approach'
def func5(an, nn, g, p):
m = len(an)
if m <= nn: return an, g
mino = np.array([node.key for node in an])
if nn == 1: # box-bound probs with exact interval analysis
ind = argmin(mino)
assert ind in (0, 1), 'error in interalg engine'
g = nanmin((mino[1-ind], g))
an = [an[i] for i in ind]
elif m > nn:
if p.solver.dataHandling == 'raw':
ind = argsort(mino)
th = mino[ind[nn]]
ind2 = where(mino < th)[0]
g = nanmin((th, g))
#an = take(an, ind2, axis=0, out=an[:ind2.size])
an = [an[i] for i in ind2]#an[ind2]
else:
g = nanmin((mino[nn], g))
an = an[:nn]
return an, g
def func4(p, y, e, o, a, fo, tnlhf_curr = None):
# TODO: simplifications for all-bool probs
if fo is None and tnlhf_curr is None: return False# used in IP
if y.size == 0: return False
cs = (y + e)/2
n = y.shape[1]
# TODO: a, o could be chenged to +/- inf instead of values duplication
if tnlhf_curr is not None:
tnlh_modL = tnlhf_curr[:, :n]
ind = logical_not(isfinite(tnlh_modL))
else:
s = o[:, :n]
ind = logical_or(s > fo, isnan(s)) # TODO: assert isnan(s) is same to isnan(a_modL)
# hasDiscreteVariables = len(p._discreteVarsNumList) != 0
indT = any(ind, 1)
if any(ind):
# if hasDiscreteVariables:
for j, v in enumerate(p._discreteVarsList):
i = p._discreteVarsNumList[j]
k = where(ind[:, i])[0]
if k.size == 0: continue
discr_mid1, discr_mid2 = splitDomainForDiscreteVariable(y[k, i], e[k, i], v)
cs[k, i] = discr_mid2
y[ind] = cs[ind]
# TODO: check is it ever called from MOP, implement if not
if p.probType != 'MOP':
a[:, :n][ind] = a[:, n:][ind]
o[:, :n][ind] = o[:, n:][ind]
if tnlhf_curr is not None:
tnlhf_curr[:, :n][ind] = tnlhf_curr[:, n:][ind]
if tnlhf_curr is not None:
tnlh_modU = tnlhf_curr[:, n:]
ind = logical_not( | isfinite(tnlh_modU) | numpy.isfinite |
import numpy as np
import cv2
lowerBound00 = (2,110,130)
upperBound00 = (7,250,210)
lowerBound01 = (1,1,1)
upperBound01 = (0,0,0)
lowerBound02 = (2,125,95)
upperBound02 = (7,250,240)
lowerBound10 = (2,100,125)
upperBound10 = (9,235,220)
lowerBound11 = (0,65,135)
upperBound11 = (9,235,230)
lowerBound12 = (2,105,95)
upperBound12 = (7,250,220)
lowerBound20 = (2,155,125)
upperBound20 = (7,250,220)
lowerBound21 = (2,125,120)
upperBound21 = (7,255,240)
lowerBound22 = (2,145,85)
upperBound22 = (7,255,200)
lowerBound30 = (2,160,115)
upperBound30 = (9,255,190)
lowerBound31 = (2,80,100)
upperBound31 = (7,250,210)
lowerBound32 = (2,125,70)
upperBound32 = (7,250,225)
lowerBound40 = (2,145,85)
upperBound40 = (7,255,195)
lowerBound41 = (2,120,115)
upperBound41 = (7,250,215)
lowerBound42 = (1,1,1)
upperBound42 = (0,0,0)
lowerBound50 = (1,1,1)
upperBound50 = (0,0,0)
lowerBound51 = (2,160,65)
upperBound51 = (7,255,205)
lowerBound52 = (1,1,1)
upperBound52 = (0,0,0)
def filter_color(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame00 = hsv[0:69, 0:426]
frame01 = hsv[0:69, 426:853]
frame02 = hsv[0:69, 853:1280]
frame10 = hsv[69:137, 0:426]
frame11 = hsv[69:137, 426:853]
frame12 = hsv[69:137, 853:1280]
frame20 = hsv[137:206, 0:426]
frame21 = hsv[137:206, 426:853]
frame22 = hsv[137:206, 853:1280]
frame30 = hsv[206:275, 0:426]
frame31 = hsv[206:275, 426:853]
frame32 = hsv[206:275, 853:1280]
frame40 = hsv[275:343, 0:426]
frame41 = hsv[275:343, 426:853]
frame42 = hsv[275:343, 853:1280]
frame50 = hsv[343:412, 0:426]
frame51 = hsv[343:412, 426:853]
frame52 = hsv[343:412, 853:1280]
colorMask00 = cv2.inRange(frame00, lowerBound00, upperBound00)
colorMask01 = cv2.inRange(frame01, lowerBound01, upperBound01)
colorMask02 = cv2.inRange(frame02, lowerBound02, upperBound02)
colorMask10 = cv2.inRange(frame10, lowerBound10, upperBound10)
colorMask11 = cv2.inRange(frame11, lowerBound11, upperBound11)
colorMask12 = cv2.inRange(frame12, lowerBound12, upperBound12)
colorMask20 = cv2.inRange(frame20, lowerBound20, upperBound20)
colorMask21 = cv2.inRange(frame21, lowerBound21, upperBound21)
colorMask22 = cv2.inRange(frame22, lowerBound22, upperBound22)
colorMask30 = cv2.inRange(frame30, lowerBound30, upperBound30)
colorMask31 = cv2.inRange(frame31, lowerBound31, upperBound31)
colorMask32 = cv2.inRange(frame32, lowerBound32, upperBound32)
colorMask40 = cv2.inRange(frame40, lowerBound40, upperBound40)
colorMask41 = cv2.inRange(frame41, lowerBound41, upperBound41)
colorMask42 = cv2.inRange(frame42, lowerBound42, upperBound42)
colorMask50 = cv2.inRange(frame50, lowerBound50, upperBound50)
colorMask51 = cv2.inRange(frame51, lowerBound51, upperBound51)
colorMask52 = cv2.inRange(frame52, lowerBound52, upperBound52)
leftMiddle0 = | np.concatenate((colorMask00, colorMask01), axis=1) | numpy.concatenate |
"""
This is used for visualizing -- not really needed otherwise
"""
import os
os.environ["DISPLAY"] = ""
import argparse
import h5py
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import json
import pandas as pd
from matplotlib.animation import FuncAnimation
COLORS = np.array(
[0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188,
0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,
1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000,
0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,
0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500,
0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,
0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500,
1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,
0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000,
0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,
1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000,
0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,
0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000,
0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,
0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429,
0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000
]
).astype(np.float32).reshape((-1, 3))
parser = argparse.ArgumentParser(description='Visualize Trajectories')
parser.add_argument(
'-fn',
dest='fn',
type=str,
help='fn to use'
)
args = parser.parse_args()
data_h5 = h5py.File(args.fn, 'r')
meta_info = json.loads(data_h5['meta_info'][()].decode('utf-8'))
meta_info['task_name'] = meta_info['task_name'].strip('_')
output_actions = json.loads(data_h5['output_actions'][()].decode('utf-8'))
output_actions.append({'action': 'Done'})
aliases = json.loads(data_h5['alias_object_id_to_old_object_id'][()].decode('utf-8'))
object_id_to_states = json.loads(data_h5['object_id_to_states'][()].decode('utf-8'))
# Last action / next action
def action_txt(action_t):
action_str = [action_t['action']]
if 'objectId' in action_t:
action_str.append(action_t['objectId'].split('|')[0])
if 'receptacleObjectId' in action_t:
action_str.append(action_t['receptacleObjectId'].split('|')[0])
return '< {} >'.format(' , '.join(action_str))
action_txt = [action_txt(action_t) for action_t in output_actions]
goal_txt = '{}:\n{}'.format(meta_info['task_name'], meta_info['text'])
for i, mid in enumerate(meta_info['main_object_ids']):
goal_txt = goal_txt.replace(f'${i+1}', '[{}]'.format(mid.split('|')[0]))
# Cheap latex style alignment
goal_txt = goal_txt.replace('. ', '.\n')
for s2 in goal_txt.split('\n'):
if len(s2) < 30:
continue
for s1 in s2.split(', ')[:-1]:
goal_txt = goal_txt.replace(s1 + ', ', s1 + ',\n')
ims = []
num_frames = data_h5['frames'].shape[0]
frames = np.array(data_h5['frames'], dtype=np.int32)
IM_SIZE = (384, 640)
DPI = 128
fig = plt.figure(frameon=False)
fig.set_size_inches(IM_SIZE[1] / DPI, IM_SIZE[0] / DPI)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
# Compute interesting state changes accross aliases
def _column_interestingness_score(col):
vcs = col.value_counts().values.astype(np.float32)
vcs_p = vcs / vcs.sum()
return -np.sum(vcs_p * | np.log(vcs_p) | numpy.log |
import os
from functools import reduce
from collections import deque
import numpy as np
import scipy as sp
from numpy import linalg as LA
from scipy.spatial import distance_matrix
from Transformations import rotation_matrix, superimposition_matrix
from SWCExtractor import Vertex
from Obj3D import Point3D, Sphere, Cone, calculateBound, calScaleRatio
from Utils import Timer
import Draw3DTools
import ImageUtils
def getRandChildNumber():
''' Random generate children number of a tree node
Input:
None
Output:
(int) : Children number
'''
return np.random.choice([1,2,3,4], p=[0.5, 0.35, 0.1, 0.05])
def getChildRadius(depth, max_depth):
if depth==0: # root
return np.random.choice([3,4,5], p=[0.25,0.5,0.25])
else:
return np.random.choice([1,2,3,4,5], p=[0.05, 0.2, 0.35, 0.35, 0.05])
def getChildLength(depth, max_depth):
''' 子节点距离父节点的长度
'''
return 25 + (max_depth-depth) + np.random.randint(0,11)
def getNodeFromMark(mark, pos, MIN_DISTANCE, MAX_DISTANCE, mark_shape, use_parent=False, parent_pos=None):
# Calculate general search range
x,y,z = pos
bbox = [x-MAX_DISTANCE, y-MAX_DISTANCE, z-MAX_DISTANCE, x+MAX_DISTANCE+1, y+MAX_DISTANCE+1, z+MAX_DISTANCE+1] # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>mark_shape[i]):
bbox[j] = mark_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
if not use_parent:
if len(x_idxs) > 0:
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
points=np.hstack((xs,ys,zs))
# 计算所有点到中心点的距离
center_point = np.array([pos]) # 1*3
dis_mat = distance_matrix(points, center_point) # M*1
# 判断距离是否小于半径
res_idxs = np.where(np.logical_and(MIN_DISTANCE<dis_mat, dis_mat<MAX_DISTANCE))[0]
if len(res_idxs)>0:
child_choose = np.random.choice(res_idxs)
child_pos = (xmin+x_idxs[child_choose], ymin+y_idxs[child_choose], zmin+z_idxs[child_choose])
return child_pos
else:
return None
else:
return None
else:
if len(x_idxs) > 0:
xs = np.asarray(xmin+x_idxs-x).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs-y).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs-z).reshape((len(z_idxs),1))
points=np.hstack((xs,ys,zs)) # M*3
parent_vec = np.array([[parent_pos[0]-pos[0]],
[parent_pos[1]-pos[1]],
[parent_pos[2]-pos[2]]]) # 3*1
# 计算所有点到中心点的距离
dis_mat = LA.norm(points, axis=1) # M*1
dis_mat = dis_mat.reshape((dis_mat.shape[0],1))
# 计算与parent_vec的夹角,保证是向外生长的
angle_mat = np.matmul(points, parent_vec) # M*1
# 判断距离是否小于半径
res_idxs = np.where(np.logical_and(angle_mat<0, dis_mat<MAX_DISTANCE))[0]
if len(res_idxs)>0:
child_choose = np.random.choice(res_idxs)
child_pos = (xmin+x_idxs[child_choose], ymin+y_idxs[child_choose], zmin+z_idxs[child_choose])
return child_pos
else:
return None
else:
return None
def setMarkWithSphere(mark, sphere, mark_shape, value, use_bbox=False):
bbox = list(sphere.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>mark_shape[i]):
bbox[j] = mark_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
# points=img_idxs[:3, xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] # 3*M
# points=points.T # M*3
if not use_bbox:
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
points=np.hstack((xs,ys,zs))
sphere_c_mat = np.array([sphere.center_point.toList()]) # 1*3
# 计算所有点到所有球心的距离
dis_mat = distance_matrix(points,sphere_c_mat) # M*1
# 判断距离是否小于半径
res_idxs = np.where(dis_mat<=sphere.radius)[0]
mark[xmin+x_idxs[res_idxs], ymin+y_idxs[res_idxs], zmin+z_idxs[res_idxs]] = value
else:
mark[xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] = value
def setMarkWithCone(mark, cone, mark_shape, value, use_bbox=False):
bbox = list(cone.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>mark_shape[i]):
bbox[j] = mark_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
if not use_bbox:
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
ns = np.ones((len(z_idxs),1))
points=np.hstack((xs,ys,zs,ns))
# 每个圆锥的还原矩阵
r_min=cone.up_radius
r_max=cone.bottom_radius
height=cone.height
cone_revert_mat = cone.revertMat().T # 4*4
# 每个椎体还原后坐标
revert_coor_mat = np.matmul(points, cone_revert_mat) # M*4
revert_radius_list = LA.norm(revert_coor_mat[:,:2], axis=1) # M
# Local Indexs
M = points.shape[0]
l_idx = np.arange(M) # M (1-dim)
l_mark = np.ones((M,), dtype=bool)
# 过滤高度在外部的点
res_idxs = np.logical_or(revert_coor_mat[l_idx[l_mark],2]<0, revert_coor_mat[l_idx[l_mark],2]>height)
l_mark[l_idx[l_mark][res_idxs]]=False
# 过滤半径在外部的点
res_idxs = revert_radius_list[l_idx[l_mark]]>r_max
l_mark[l_idx[l_mark][res_idxs]]=False
# 过滤半径在内部的点
res_idxs = revert_radius_list[l_idx[l_mark]]<=r_min
mark[xmin+x_idxs[l_idx[l_mark][res_idxs]], ymin+y_idxs[l_idx[l_mark][res_idxs]], zmin+z_idxs[l_idx[l_mark][res_idxs]]] = value
l_mark[l_idx[l_mark][res_idxs]]=False
# 计算剩余
if r_max>r_min:
res_idxs = ((r_max-revert_radius_list[l_idx[l_mark]])*height/(r_max-r_min)) >= revert_coor_mat[l_idx[l_mark],2]
mark[xmin+x_idxs[l_idx[l_mark][res_idxs]], ymin+y_idxs[l_idx[l_mark][res_idxs]], zmin+z_idxs[l_idx[l_mark][res_idxs]]] = value
l_mark[l_idx[l_mark][res_idxs]]=False
else:
mark[xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] = value
def simulate3DTree():
MAX_TREE_DEPTH = 4
MAX_RADIUS = 6
SAFE_DISTANCE = MAX_RADIUS + 2
MAX_DISTANCE = 16
mark_shape = (251,251,251)
# Init space
mark = np.zeros(mark_shape, dtype=np.uint8)
mark_shape = mark.shape
node_count = 0
# Create root node
root_r = getChildRadius(0,MAX_TREE_DEPTH)
root_pos = (125,125,125)
node_count += 1
root_node = Vertex(node_count,0,root_pos[0],root_pos[1],root_pos[2],root_r,-1)
setMarkWithSphere(mark, Sphere(Point3D(*root_node.pos), root_node.r), mark_shape, 255)
# setMarkWithSphere(mark, Sphere(Point3D(*root_node.pos), root_node.r + SAFE_DISTANCE), mark_shape, 1)
# Creante dequeue and list to contain result
dq = deque([(root_node, 0)]) # 第二项表示node节点的depth
nodes = {}
graph = {}
while len(dq):
root_node = dq[0][0]
root_depth = dq[0][1]
dq.popleft()
# Add to nodes and graph
v1 = root_node.idx
v2 = root_node.p_idx
if root_node.idx not in nodes:
nodes[root_node.idx] = root_node
if v1>0 and v2>0:
if not v1 in graph:
graph[v1] = set([v2])
else:
graph[v1].add(v2)
if not v2 in graph:
graph[v2] = set([v1])
else:
graph[v2].add(v1)
if root_depth<MAX_TREE_DEPTH:
# Get children number
if root_node.idx==1: # 根节点与其他节点单独处理
child_num = 4
mask = np.array([[1,1,1],
[-1,1,1],
[1,1,-1],
[-1,1,-1]])
for i in range(4):
# 获取分支半径和长度
child_r = getChildRadius(root_depth+1,MAX_TREE_DEPTH)
child_length = getChildLength(root_depth+1,MAX_TREE_DEPTH)
#theta_z = np.random.uniform(30,60)
theta_y = 45
#A = rotation_matrix(theta_z/180*np.math.pi, [0,0,1])
B = rotation_matrix(-theta_y/180*np.math.pi, [0,1,0])
# rot_mat = np.matmul(A,B)
p0 = np.array([[child_length],[0],[0],[1]])
p1 = np.matmul(B, p0)
child_pos = (int(p1[0]*mask[i][0]+root_node.pos[0]), \
int(p1[1]*mask[i][1]+root_node.pos[1]), \
int(p1[2]*mask[i][2]+root_node.pos[2]))
if ImageUtils.bboxCheck3D(child_pos[0], child_pos[1], child_pos[2], child_r, mark_shape):
node_count += 1
#print('parent', root_node.idx, 'id', node_count, 'pos', child_pos, 'depth', root_depth+1)
child_node = Vertex(node_count, 0, child_pos[0], child_pos[1], child_pos[2], child_r, root_node.idx)
# 绘制
setMarkWithSphere(mark, Sphere(Point3D(*child_node.pos), child_node.r), mark_shape, 255)
setMarkWithCone(mark, Cone(Point3D(*root_node.pos), root_node.r, \
Point3D(*child_node.pos), child_node.r), mark_shape, 255)
# Add to dequeue
dq.append((child_node, root_depth+1))
else:
child_num = getRandChildNumber()
child_angles_range = Draw3DTools.sliceRange(0, 360, child_num)
for i in range(child_num):
# 获取分支半径和长度
child_r = getChildRadius(root_depth+1,MAX_TREE_DEPTH)
child_length = getChildLength(root_depth+1,MAX_TREE_DEPTH)
# 获取生长角度
if child_num==1:
theta_z = np.random.uniform(0,360)
theta_y = np.random.uniform(60,90)
else:
theta_z = np.random.uniform(child_angles_range[i][0],child_angles_range[i][1])
theta_y = | np.random.uniform(30,70) | numpy.random.uniform |
from __future__ import print_function
import time
import numpy as np
import numpy.random as rnd
from pymanopt import Problem
from pymanopt.solvers.steepest_descent import SteepestDescent
from pymanopt.solvers.solver import Solver
def compute_centroid(man, x):
"""
Compute the centroid as Karcher mean of points x belonging to the manifold
man.
"""
n = len(x)
def objective(y): # weighted Frechet variance
acc = 0
for i in range(n):
acc += man.dist(y, x[i]) ** 2
return acc / 2
def gradient(y):
g = man.zerovec(y)
for i in range(n):
g -= man.log(y, x[i])
return g
# XXX: manopt runs a few TR iterations here. For us to do this, we either
# need to work out the Hessian of the Frechet variance by hand or
# implement approximations for the Hessian to use in the TR solver.
# This is because we cannot implement the Frechet variance with theano
# and compute the Hessian automatically due to dependency on the
# manifold-dependent distance function.
solver = SteepestDescent(maxiter=15)
problem = Problem(man, cost=objective, grad=gradient, verbosity=0)
return solver.solve(problem)
class NelderMead(Solver):
"""
Nelder-Mead minimization alglorithm for derivative-free minimization
based on neldermead.m and centroid.m from the manopt MATLAB package.
"""
def __init__(self, maxcostevals=None, maxiter=None, reflection=1,
expansion=2, contraction=0.5, *args, **kwargs):
"""
Instantiate Nelder-Mead method solver class.
Variable attributes (defaults in brackets):
- maxcostevals (max(5000, 2 * dim))
Maximum number of allowed cost evaluations
- maxiter (max(500, 4 * dim))
Maximum number of allowed iterations
- reflection (1)
Determines how far to reflect away from the worst vertex;
stretched (reflection > 1), compressed (0 < reflection < 1),
or exact (reflection = 1)
- expansion (2)
Factor by which to expand the reflected simplex
- contraction (0.5)
Factor by which to contract the reflected simplex
"""
super(NelderMead, self).__init__(*args, **kwargs)
self._maxcostevals = maxcostevals
self._maxiter = maxiter
self._reflection = reflection
self._expansion = expansion
self._contraction = contraction
def solve(self, problem, x=None):
"""
Perform optimization using a Nelder-Mead minimization algorithm.
Arguments:
- problem
Pymanopt problem setup using the Problem class, this must
have a .manifold attribute specifying the manifold to optimize
over, as well as a cost and enough information to compute
the gradient of that cost.
- x=None
Optional parameter. Initial population of elements on the
manifold. If None then an initial population will be randomly
generated
Returns:
- x
Local minimum of obj, or if algorithm terminated before
convergence x will be the point at which it terminated
"""
man = problem.manifold
verbosity = problem.verbosity
objective = problem.cost
# Choose proper default algorithm parameters. We need to know about the
# dimension of the manifold to limit the parameter range, so we have to
# defer proper initialization until this point.
dim = man.dim
if self._maxcostevals is None:
self._maxcostevals = max(1000, 2 * dim)
if self._maxiter is None:
self._maxiter = max(2000, 4 * dim)
# If no initial simplex x is given by the user, generate one at random.
if x is None:
x = [man.rand() for i in range(int(dim + 1))]
elif not hasattr(x, "__iter__"):
raise ValueError("The initial simplex x must be iterable")
else:
# XXX: Is this necessary?
if len(x) != dim + 1:
print("The simplex size was adapted to the dimension "
"of the manifold")
x = x[:dim + 1]
# Compute objective-related quantities for x, and setup a function
# evaluations counter.
costs = np.array([objective(xi) for xi in x])
fy = list(costs)
costevals = dim + 1
# Sort simplex points by cost.
order = np.argsort(costs)
costs = costs[order]
x = [x[i] for i in order] # XXX: Probably inefficient
# Iteration counter (at any point, iter is the number of fully executed
# iterations so far).
iter = 0
time0 = time.time()
self._start_optlog()
while True:
iter += 1
if verbosity >= 2:
print("Cost evals: %7d\t"
"Best cost: %+.8e" % (costevals, costs[0]))
# Sort simplex points by cost.
order = np.argsort(costs)
costs = costs[order]
x = [x[i] for i in order] # XXX: Probably inefficient
stop_reason = self._check_stopping_criterion(
time0, iter=iter, costevals=costevals)
if stop_reason:
if verbosity >= 1:
print(stop_reason)
print('')
break
# Compute a centroid for the dim best points.
xbar = compute_centroid(man, x[:-1])
# Compute the direction for moving along the axis xbar - worst x.
vec = man.log(xbar, x[-1])
# Reflection step
xr = man.exp(xbar, -self._reflection * vec)
costr = objective(xr)
costevals += 1
# If the reflected point is honorable, drop the worst point,
# replace it by the reflected point and start a new iteration.
if costr >= costs[0] and costr < costs[-2]:
if verbosity >= 2:
print("Reflection")
costs[-1] = costr
x[-1] = xr
continue
# If the reflected point is better than the best point, expand.
if costr < costs[0]:
xe = man.exp(xbar, -self._expansion * vec)
coste = objective(xe)
costevals += 1
if coste < costr:
if verbosity >= 2:
print("Expansion")
costs[-1] = coste
x[-1] = xe
continue
else:
if verbosity >= 2:
print("Reflection (failed expansion)")
costs[-1] = costr
x[-1] = xr
continue
# If the reflected point is worse than the second to worst point,
# contract.
if costr >= costs[-2]:
if costr < costs[-1]:
# do an outside contraction
xoc = man.exp(xbar, -self._contraction * vec)
costoc = objective(xoc)
costevals += 1
if costoc <= costr:
if verbosity >= 2:
print("Outside contraction")
costs[-1] = costoc
x[-1] = xoc
continue
else:
# do an inside contraction
xic = man.exp(xbar, self._contraction * vec)
costic = objective(xic)
costevals += 1
if costic <= costs[-1]:
if verbosity >= 2:
print("Inside contraction")
costs[-1] = costic
x[-1] = xic
continue
# If we get here, shrink the simplex around x[0].
if verbosity >= 2:
print("Shrinkage")
x0 = x[0]
for i in | np.arange(1, dim + 1) | numpy.arange |
# !/usr/bin/env python
# Created by "Thieu" at 22:08, 01/03/2021 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSA(Optimizer):
"""
The original version of: Simulated Annealing (SA)
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ max_sub_iter (int): [5, 10, 15], Maximum Number of Sub-Iteration (within fixed temperature), default=5
+ t0 (int): Fixed parameter, Initial Temperature, default=1000
+ t1 (int): Fixed parameter, Final Temperature, default=1
+ move_count (int): [5, 20], Move Count per Individual Solution, default=5
+ mutation_rate (float): [0.01, 0.2], Mutation Rate, default=0.1
+ mutation_step_size (float): [0.05, 0.1, 0.15], Mutation Step Size, default=0.1
+ mutation_step_size_damp (float): [0.8, 0.99], Mutation Step Size Damp, default=0.99
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.physics_based.SA import BaseSA
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> max_sub_iter = 5
>>> t0 = 1000
>>> t1 = 1
>>> move_count = 5
>>> mutation_rate = 0.1
>>> mutation_step_size = 0.1
>>> mutation_step_size_damp = 0.99
>>> model = BaseSA(problem_dict1, epoch, pop_size, max_sub_iter, t0, t1, move_count, mutation_rate, mutation_step_size, mutation_step_size_damp)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] <NAME>. and <NAME>., 1987. Simulated annealing. In Simulated
annealing: Theory and applications (pp. 7-15). Springer, Dordrecht.
"""
def __init__(self, problem, epoch=10000, pop_size=100, max_sub_iter=5, t0=1000, t1=1, move_count=5,
mutation_rate=0.1, mutation_step_size=0.1, mutation_step_size_damp=0.99, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
max_sub_iter (int): Maximum Number of Sub-Iteration (within fixed temperature), default=5
t0 (int): Initial Temperature, default=1000
t1 (int): Final Temperature, default=1
move_count (int): Move Count per Individual Solution, default=5
mutation_rate (float): Mutation Rate, default=0.1
mutation_step_size (float): Mutation Step Size, default=0.1
mutation_step_size_damp (float): Mutation Step Size Damp, default=0.99
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.max_sub_iter = self.validator.check_int("max_sub_iter", max_sub_iter, [1, 100000])
self.t0 = self.validator.check_int("t0", t0, [500, 2000])
self.t1 = self.validator.check_int("t1", t1, [1, 100])
self.move_count = self.validator.check_int("move_count", move_count, [2, int(self.pop_size/2)])
self.mutation_rate = self.validator.check_float("mutation_rate", mutation_rate, (0, 1.0))
self.mutation_step_size = self.validator.check_float("mutation_step_size", mutation_step_size, (0, 1.0))
self.mutation_step_size_damp = self.validator.check_float("mutation_step_size_damp", mutation_step_size_damp, (0, 1.0))
self.nfe_per_epoch = self.pop_size * self.max_sub_iter * self.move_count
self.sort_flag = True
self.dyn_t, self.t_damp, self.dyn_sigma = None, None, None
def _mutate(self, position, sigma):
# Select Mutating Variables
pos_new = position + sigma * np.random.uniform(self.problem.lb, self.problem.ub)
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < self.mutation_rate, position, pos_new)
if | np.all(pos_new == position) | numpy.all |
#!/usr/bin/env python
from problem import Problem
import rospy
import numpy as np
from scipy.stats import rice
from scipy.special import jv as besseli
from tools import compute_distance
# sigma=12.551, rice_b=0.009, rice_loc=-7.001
class WLANLocalization(Problem):
def __init__(self, locations, neighbours, Ptx=12.0, Gtx=1.5, Ltx=0.0, Grx=1.5, Lrx=0.0, v=2.4e9, mu=4.0, sigma=12.551, rice_b=0.009, rice_loc=-7.001):
super(WLANLocalization, self).__init__(locations, neighbours)
self.RSS_base = 147.55 - 20*np.log10(v)
self.sigma = sigma
self.rice_b = rice_b
self.rice_loc = rice_loc
def rss_noiseless(self, distances):
return self.RSS_base - 20.0 * np.log10(distances)
def single_likelihood(self, source_locations, l, z):
d = compute_distance(source_locations.reshape(-1,2), l.reshape(-1,2))
rss = self.rss_noiseless(d)
fading = rss-z
if | np.all(fading < 0) | numpy.all |
import numpy as np
import pdb
# import spiking function
from spiking_ulils import label_encoder
from spiking_ulils import Conv2d, BatchNorm2d, Relu
from spiking_ulils import Flatten
from spiking_ulils import Linear
import argparse
parser = argparse.ArgumentParser()
# quantization level
parser.add_argument('--k', type=int, default=1)
args = parser.parse_args()
print(args.k)
class MyNet():
def __init__(self):
self.conv0 = Conv2d(in_channels=3, n_filter=128, filter_size=(3, 3), padding=1, stride=1, use_ternary=False)
self.bn0 = BatchNorm2d(n_channel=128, momentum=0.1)
self.relu0 = Relu()
self.conv1 = Conv2d(in_channels=128, n_filter=256, filter_size=(3, 3), padding=1, stride=1, use_ternary=False)
self.bn1 = BatchNorm2d(n_channel=256, momentum=0.1)
self.relu1 = Relu()
self.conv2 = Conv2d(in_channels=256, n_filter=256, filter_size=(2, 2), padding=0, stride=2, use_ternary=False)
self.bn2 = BatchNorm2d(n_channel=256, momentum=0.1)
self.relu2 = Relu()
self.conv3 = Conv2d(in_channels=256, n_filter=512, filter_size=(3, 3), padding=1, stride=1, use_ternary=False)
self.bn3 = BatchNorm2d(n_channel=512, momentum=0.1)
self.relu3 = Relu()
self.conv4 = Conv2d(in_channels=512, n_filter=512, filter_size=(2, 2), padding=0, stride=2, use_ternary=False)
self.bn4 = BatchNorm2d(n_channel=512, momentum=0.1)
self.relu4 = Relu()
self.conv5 = Conv2d(in_channels=512, n_filter=1024, filter_size=(3, 3), padding=1, stride=1, use_ternary=False)
self.bn5 = BatchNorm2d(n_channel=1024, momentum=0.1)
self.relu5 = Relu()
self.conv6 = Conv2d(in_channels=1024, n_filter=512, filter_size=(3, 3), padding=1, stride=1, use_ternary=False)
self.bn6 = BatchNorm2d(n_channel=512, momentum=0.1)
self.relu6 = Relu()
self.conv7 = Conv2d(in_channels=512, n_filter=512, filter_size=(2, 2), padding=0, stride=2, use_ternary=False)
self.bn7 = BatchNorm2d(n_channel=512, momentum=0.1)
self.relu7 = Relu()
self.conv8 = Conv2d(in_channels=512, n_filter=1024, filter_size=(3, 3), padding=0, stride=1, use_ternary=False)
self.bn8 = BatchNorm2d(n_channel=1024, momentum=0.1)
self.relu8 = Relu()
self.conv9 = Conv2d(in_channels=1024, n_filter=512, filter_size=(1, 1), padding=0, stride=1, use_ternary=False)
self.bn9 = BatchNorm2d(n_channel=512, momentum=0.1)
self.relu9 = Relu()
self.flatten = Flatten()
# ȫ���Ӳ�
self.fc1 = Linear(dim_in=512, dim_out=10, use_ternary=False)
self.parameters = self.conv0.params + self.bn0.params + self.conv1.params + self.bn1.params + self.conv2.params + self.bn2.params + \
self.conv3.params + self.bn3.params + self.conv4.params + self.bn4.params + self.conv5.params + self.bn5.params + self.conv6.params + self.bn6.params + \
self.conv7.params + self.bn7.params + self.conv8.params + self.bn8.params + self.conv9.params + self.bn9.params + \
self.fc1.params
self.dummy_layers = [self.conv0, self.bn0, self.conv1, self.bn1, self.conv2, self.bn2, self.conv3, self.bn3, self.conv4, self.bn4, \
self.conv5, self.bn5, self.conv6, self.bn6, self.conv7, self.bn7, self.conv8, self.bn8, self.conv9, self.bn9, \
self.fc1]
def __call__(self, X, t, mode='train'):
"""
mode: train or test
"""
return self.forward(X, t, mode)
# spiking network inference during multiple time steps
def forward(self, X, t, mode):
# the first layer is usually a pixel-to-spike encoding layer
conv0_out, conv0_spike_collect, conv0_spike_num, conv0_sop_num = self.conv0(X, t)
conv1_out, conv1_spike_collect, conv1_spike_num, conv1_sop_num = self.conv1(conv0_out, t)
conv2_out, conv2_spike_collect, conv2_spike_num, conv2_sop_num = self.conv2(conv1_out, t)
conv3_out, conv3_spike_collect, conv3_spike_num, conv3_sop_num = self.conv3(conv2_out, t)
conv4_out, conv4_spike_collect, conv4_spike_num, conv4_sop_num = self.conv4(conv3_out, t)
conv5_out, conv5_spike_collect, conv5_spike_num, conv5_sop_num = self.conv5(conv4_out, t)
conv6_out, conv6_spike_collect, conv6_spike_num, conv6_sop_num = self.conv6(conv5_out, t)
conv7_out, conv7_spike_collect, conv7_spike_num, conv7_sop_num = self.conv7(conv6_out, t)
conv8_out, conv8_spike_collect, conv8_spike_num, conv8_sop_num = self.conv8(conv7_out, t)
conv9_out, conv9_spike_collect, conv9_spike_num, conv9_sop_num = self.conv9(conv8_out, t)
flat_out = self.flatten(conv9_spike_collect, t)
# the last layer output the membrane potential value indexing category
fc1_out = self.fc1(flat_out, t)
# spike number
spike_num = conv0_spike_num + conv1_spike_num + conv2_spike_num + conv3_spike_num + conv4_spike_num + \
conv5_spike_num + conv6_spike_num + conv7_spike_num + conv8_spike_num + conv9_spike_num
# spike collector
spike_collect = np.sum(conv0_spike_collect) + np.sum(conv1_spike_collect) + np.sum(conv2_spike_collect) + np.sum(conv3_spike_collect) + np.sum(conv4_spike_collect) + \
np.sum(conv5_spike_collect) + np.sum(conv6_spike_collect) + np.sum(conv7_spike_collect) + np.sum(conv8_spike_collect) + | np.sum(conv9_spike_collect) | numpy.sum |
# code for avoiding Tensorflow to use the GPU
import tensorflow as tf
from keras import backend as K
num_cores = 4
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\
inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\
device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
K.set_session(session)
import numpy as np
import cv2
import time
from Util import Util
class FacialEmotion:
def __init__(self, facial_emotion, confidence):
self.facial_emotion = facial_emotion
self.confidence = confidence
class FacialEmotionClassifierResult:
def __init__(self, facial_emotions=[]):
self.facial_emotions = facial_emotions
def convert_to_list(self):
emotions = [emotion.facial_emotion for emotion in self.facial_emotions]
confidences = [emotion.confidence for emotion in self.facial_emotions]
return emotions, confidences
class FacialEmotionClassifier:
def __init__(self, model_name='CNN', input_size=(64, 64, 3), threshold=0.20, do_timing=False):
self.threshold = threshold
self.input_size = input_size
self.image_size = None
self.do_timing = do_timing
self.result = None
self.emotion_labels = {0:'angry', 1:'disgust', 2:'fear', 3:'happy', 4:'sad', 5:'surprise', 6:'neutral'}
self.set_model(model_name)
def get_result(self):
return self.result
def predict(self, image, face_bounding_boxes, confidences):
# pre-process
pre_process_runtime_start = time.time()
model_input = self.pre_process(image, face_bounding_boxes)
pre_process_runtime_end = time.time()
# model prediction
model_predict_runtime_start = time.time()
model_output = self.model_predict(model_input)
model_predict_runtime_end = time.time()
# postprocess
post_process_runtime_start = time.time()
facial_emotions, confidences = self.post_process(model_output)
post_process_runtime_end = time.time()
self.result = FacialEmotionClassifierResult([FacialEmotion(facial_emotion, confidence) for facial_emotion, confidence in zip(facial_emotions, confidences)])
if self.do_timing:
print('facial emotion classifier preprocessing time (ms):', (pre_process_runtime_end - pre_process_runtime_start) * 1e3)
print('facial emotion classifier prediction time (ms):', (model_predict_runtime_end - model_predict_runtime_start) * 1e3)
print('facial emotion classifier post-processing time (ms):', (post_process_runtime_end - post_process_runtime_start) * 1e3)
return facial_emotions, confidences
def set_model(self, model_name):
if model_name == 'CNN':
from keras.models import load_model
emotion_model_path = './models/emotion_classification/cnn_mxnet/emotion_model.hdf5'
self.model = load_model(emotion_model_path)
def pre_process(self, image, face_bounding_boxes):
self.image_size = np.shape(image)
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
emotion_offsets = (20, 40)
if len(face_bounding_boxes) > 0:
# upscale bounding boxes
upscale_bounding_boxes = Util.upscale(face_bounding_boxes, self.image_size, offsets=emotion_offsets)
# crop and resize each previously detected humans
model_input = Util.crop_resize(gray_image, upscale_bounding_boxes, self.input_size)
# normalize the resulting images
model_input = np.array(model_input)
model_input = model_input.astype('float32')
model_input /= 255.
model_input = (model_input - 0.5) * 2.
# expand
model_input = np.expand_dims(model_input, -1)
else:
model_input = None
upscale_bounding_boxes = None
return model_input
def model_predict(self, model_input):
if model_input is not None:
model_output = self.model.predict(model_input)
else:
model_output = | np.empty((0, 0)) | numpy.empty |
"""
This module provides the python implementation of the functions for each
mathematical nodes used in Agraph
Attributes
----------
FORWARD_EVAL_MAP : dictionary {int: function}
A map of node number to evaluation function
REVERSE_EVAL_MAP : dictionary {int: function}
A map of node number to derivative evaluation function
"""
import numpy as np
from bingo.symbolic_regression.agraph.operator_definitions \
import INTEGER, VARIABLE, CONSTANT, ADDITION, SUBTRACTION, MULTIPLICATION, \
DIVISION, SIN, COS, SINH, COSH, EXPONENTIAL, LOGARITHM, POWER, ABS, \
SQRT, SAFE_POWER
np.seterr(divide='ignore', invalid='ignore')
# Integer value
def _integer_forward_eval(param1, _param2, _x, _constants, _forwardeval):
return float(param1)
def _integer_reverse_eval(_reverseindex, _param1, _param2, _forwardeval,
_reverseeval):
pass
# Load x column
def _loadx_forward_eval(param1, _param2, x, _constants, _forwardeval):
return x[:, param1].reshape((-1, 1))
def _loadx_reverse_eval(_reverseindex, _param1, _param2, _forwardeval,
_reverseeval):
pass
# Load constant
def _loadc_forward_eval(param1, _param2, _x, constants, _forwardeval):
return constants[param1]
def _loadc_reverse_eval(_reverseindex, _param1, _param2, _forwardeval,
_reverseeval):
pass
# Addition
def _add_forward_eval(param1, param2, _x, _constants, forward_eval):
return forward_eval[param1] + forward_eval[param2]
def _add_reverse_eval(reverse_index, param1, param2, _forwardeval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index]
reverse_eval[param2] += reverse_eval[reverse_index]
# Subtraction
def _subtract_forward_eval(param1, param2, _x, _constants, forward_eval):
return forward_eval[param1] - forward_eval[param2]
def _subtract_reverse_eval(reverse_index, param1, param2, _forwardeval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index]
reverse_eval[param2] -= reverse_eval[reverse_index]
# Multiplication
def _multiply_forward_eval(param1, param2, _x, _constants, forward_eval):
return forward_eval[param1] * forward_eval[param2]
def _multiply_reverse_eval(reverse_index, param1, param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index]*forward_eval[param2]
reverse_eval[param2] += reverse_eval[reverse_index]*forward_eval[param1]
# Division
def _divide_forward_eval(param1, param2, _x, _constants, forward_eval):
return forward_eval[param1] / forward_eval[param2]
def _divide_reverse_eval(reverse_index, param1, param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] / forward_eval[param2]
reverse_eval[param2] -= reverse_eval[reverse_index] *\
forward_eval[reverse_index] / forward_eval[param2]
# Sine
def _sin_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.sin(forward_eval[param1])
def _sin_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += \
reverse_eval[reverse_index] * np.cos(forward_eval[param1])
# Cosine
def _cos_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.cos(forward_eval[param1])
def _cos_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] -= \
reverse_eval[reverse_index] * np.sin(forward_eval[param1])
# Hyperbolic Sine
def _sinh_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.sinh(forward_eval[param1])
def _sinh_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += \
reverse_eval[reverse_index] * np.cosh(forward_eval[param1])
# Hyperbolic Cosine
def _cosh_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.cosh(forward_eval[param1])
def _cosh_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += \
reverse_eval[reverse_index] * np.sinh(forward_eval[param1])
# Exponential
def _exp_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.exp(forward_eval[param1])
def _exp_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] *\
forward_eval[reverse_index]
# Natural logarithm
def _log_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.log(np.abs(forward_eval[param1]))
def _log_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] /\
forward_eval[param1]
# Power
def _pow_forward_eval(param1, param2, _x, _constants, forward_eval):
return np.power(forward_eval[param1], forward_eval[param2])
def _pow_reverse_eval(reverse_index, param1, param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] *\
forward_eval[reverse_index] *\
forward_eval[param2] / forward_eval[param1]
reverse_eval[param2] += reverse_eval[reverse_index] *\
forward_eval[reverse_index] *\
np.log(forward_eval[param1])
# Safe Power
def _safe_pow_forward_eval(param1, param2, _x, _constants, forward_eval):
return np.power(np.abs(forward_eval[param1]), forward_eval[param2])
def _safe_pow_reverse_eval(reverse_index, param1, param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] *\
forward_eval[reverse_index] *\
forward_eval[param2] / forward_eval[param1]
reverse_eval[param2] += reverse_eval[reverse_index] *\
forward_eval[reverse_index] *\
np.log(np.abs(forward_eval[param1]))
# Absolute value
def _abs_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.abs(forward_eval[param1])
def _abs_reverse_eval(reverse_index, param1, _param2, forward_eval,
reverse_eval):
reverse_eval[param1] += reverse_eval[reverse_index] *\
np.sign(forward_eval[param1])
# Square root
def _sqrt_forward_eval(param1, _param2, _x, _constants, forward_eval):
return np.sqrt( | np.abs(forward_eval[param1]) | numpy.abs |
"""
Created on 2020
@author: <NAME>
"""
###############################################################################
#
# November 2020, Paris
#
# This file contains the main functions concerning the angular tansformations,
# sky projections and spherical trigonomtry.
#
# Documentation is provided on Vitral, 2021.
# If you have any further questions please email <EMAIL>
#
###############################################################################
import numpy as np
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ------------------------------------------------------------------------------
"Global variables"
# ------------------------------------------------------------------------------
# Right ascention of the north galactic pole, in radians
a_NGP = 192.85947789 * (np.pi / 180)
# Declination of the north galactic pole, in radians
d_NGP = 27.12825241 * (np.pi / 180)
# Longitude of the north celestial pole, in radians
l_NCP = 122.93192526 * (np.pi / 180)
# Vertical waves in the solar neighbourhood in Gaia DR2
# Bennett & Bovy, 2019, MNRAS
# --> Sun Z position (kpc), in galactocentric coordinates
z_sun = 0.0208
# Sun Y position (kpc), in galactocentric coordinates, by definition.
y_sun = 0
# A geometric distance measurement to the Galactic center black hole
# with 0.3% uncertainty
# Gracity Collaboration, 2019, A&A
# --> Sun distance from the Galactic center, in kpc.
d_sun = 8.178
# Sun X position (kpc), in galactocentric coordinates
x_sun = np.sqrt(d_sun * d_sun - z_sun * z_sun)
# On the Solar Velocity
# <NAME> and <NAME>, 2018, RNAAS
# --> Sun X velocity (km/s), in galactocentric coordinates
vx_sun = -12.9
# --> Sun Y velocity (km/s), in galactocentric coordinates
vy_sun = 245.6
# --> Sun Z velocity (km/s), in galactocentric coordinates
vz_sun = 7.78
# Multuplying factor to pass from kpc to km
kpc_to_km = 3.086 * 10 ** 16
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# ------------------------------------------------------------------------------
"Angles handling"
# ------------------------------------------------------------------------------
def sky_distance_deg(RA, Dec, RA0, Dec0):
"""
Computes the sky distance (in degrees) between two sets of
sky coordinates, given also in degrees.
Parameters
----------
RA : array_like, float
Right ascension (in degrees) of object 1.
Dec : array_like (same shape as RA), float
Declination (in degrees) of object 1.
RA0 : array_like (same shape as RA), float
Right ascension (in degrees) of object 2.
Dec0 : array_like (same shape as RA), float
Declination (in degrees) of object 2.
Returns
-------
R : array_like, float
Sky distance (in degrees) between object 1 and object 2.
"""
RA = RA * np.pi / 180
Dec = Dec * np.pi / 180
RA0 = RA0 * np.pi / 180
Dec0 = Dec0 * np.pi / 180
R = (180 / np.pi) * np.arccos(
np.sin(Dec) * np.sin(Dec0) + np.cos(Dec) * np.cos(Dec0) * np.cos((RA - RA0))
)
return np.asarray(R)
def get_circle_sph_trig(r, a0, d0, nbins=500):
"""
Generates a circle in spherical coordinates.
Parameters
----------
r : float
Distance from the center, in degrees.
a0 : float
Right ascention from origin, in degrees.
d0 : float
Declination from origin in, in degrees.
nbins : int, optional
Number of circle points. The default is 500.
Returns
-------
ra : array_like
Right ascention in degrees.
dec : array_like
Declination in degrees.
"""
# Converts angles to radians
r = r * np.pi / 180
a0 = a0 * np.pi / 180
d0 = d0 * np.pi / 180
phi = np.linspace(0, 2 * np.pi, nbins)
a = np.zeros(nbins)
d = np.zeros(nbins)
for i in range(0, nbins):
a[i], d[i] = polar_to_sky(r, phi[i], a0, d0)
return a, d
def polar_to_sky(r, phi, a0, d0):
"""
Transforms spherical polar coordinates (r,phi) into sky coordinates,
in degrees (RA,Dec).
Parameters
----------
r : array_like
Radial distance from center.
phi : array_like
Angle between increasing declination and the projected radius
(pointing towards the source).
a0 : float
Right ascention from origin in radians.
d0 : float
Declination from origin in radians.
Returns
-------
ra : array_like
Right ascention in degrees.
dec : array_like
Declination in degrees.
"""
d = np.arcsin(np.cos(r) * np.sin(d0) + np.cos(d0) * np.cos(phi) * np.sin(r))
if phi < np.pi:
if (np.cos(r) - np.sin(d) * np.sin(d0)) / (np.cos(d) * np.cos(d0)) > 0:
a = a0 + np.arccos(np.sqrt(1 - (np.sin(phi) * np.sin(r) / np.cos(d)) ** 2))
else:
a = a0 + np.arccos(-np.sqrt(1 - (np.sin(phi) * np.sin(r) / np.cos(d)) ** 2))
if phi >= np.pi:
if (np.cos(r) - np.sin(d) * np.sin(d0)) / (np.cos(d) * np.cos(d0)) > 0:
a = a0 - np.arccos(np.sqrt(1 - (np.sin(phi) * np.sin(r) / np.cos(d)) ** 2))
else:
a = a0 - np.arccos(-np.sqrt(1 - (np.sin(phi) * np.sin(r) / np.cos(d)) ** 2))
ra = a * 180 / np.pi
dec = d * 180 / np.pi
return ra, dec
def sky_to_polar(a, d, a0, d0):
"""
Transforms sky coordinates, in degrees (RA,Dec), into
spherical polar coordinates (r,phi).
Parameters
----------
a : array_like
Right ascention in degrees.
d : array_like
Declination in degrees.
a0 : float
Right ascention from origin.
d0 : float
Declination from origin.
Returns
-------
r : array_like
Radial distance from center.
p : array_like
Angle between increasing declination and the projected radius
(pointing towards the source), in radians.
"""
r = sky_distance_deg(a, d, a0, d0) * np.pi / 180
a = a * np.pi / 180
d = d * np.pi / 180
sp = np.cos(d) * np.sin(a - (a0 * np.pi / 180)) / np.sin(r)
p = np.zeros(len(sp))
spp = np.where(sp > 0)
spm = np.where(sp <= 0)
dp = np.where(d > (d0 * np.pi / 180))
dm = np.where(d <= (d0 * np.pi / 180))
p[np.intersect1d(spp, dp)] = np.arcsin(sp[np.intersect1d(spp, dp)])
p[np.intersect1d(spp, dm)] = np.pi - np.arcsin(sp[np.intersect1d(spp, dm)])
p[np.intersect1d(spm, dp)] = 2 * np.pi + np.arcsin(sp[np.intersect1d(spm, dp)])
p[np.intersect1d(spm, dm)] = np.pi - np.arcsin(sp[np.intersect1d(spm, dm)])
return r, p
def angular_sep_vector(v0, v):
"""
Returns separation angle in radians between two 3D arrays.
Parameters
----------
v0 : 3D array
Vector 1.
v : 3D array
Vector 2.
Returns
-------
R : array_like
Separation between vector 1 and vector 2 in radians.
"""
try:
v0.shape
except NameError:
print("You did not give a valid input.")
return
v = v / np.linalg.norm(v)
B = np.arcsin(v[2])
cosA = v[0] / np.cos(B)
sinA = v[1] / np.cos(B)
A = np.arctan2(sinA, cosA)
v0 = v0 / np.linalg.norm(v0)
B0 = np.arcsin(v0[2])
cosA0 = v0[0] / np.cos(B0)
sinA0 = v0[1] / np.cos(B0)
A0 = np.arctan2(sinA0, cosA0)
cosR = np.sin(B0) * np.sin(B) + np.cos(B0) * np.cos(B) * np.cos(A - A0)
R = np.arccos(cosR)
return R
def rodrigues_formula(k, v, theta, debug=False):
"""
Returns the rotation of v of an angle theta with respect to the vector k
Applies the Rodrigues formula from:
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
Parameters
----------
k : array_like
Vector with respect to which v will be rotated.
v : array_like
Vector to be rotated.
theta : float
Angle to rotate v.
debug : boolean, optional
True if the reader wants to print debug diagnistics.
The default is False.
Returns
-------
v_rot : array_like
Rotated vector.
"""
try:
v.shape
except NameError:
print("You did not give a valid input for the Rodrigues formula.")
return
if len(np.shape(v)) == 1 and len(v) == 3:
v_rot = (
v * np.cos(theta)
+ np.cross(k, v) * np.sin(theta)
+ k * np.dot(k, v) * (1 - np.cos(theta))
)
elif len(np.shape(v)) == 2 and np.shape(v)[0] == 3:
v_rot = np.zeros((np.shape(v)[1], 3))
for i in range(0, len(v_rot)):
v0 = np.asarray([v[0][i], v[1][i], v[2][i]])
v_rot[i] = (
v0 * np.cos(theta)
+ np.cross(k, v0) * np.sin(theta)
+ k * np.dot(v0, k) * (1 - np.cos(theta))
)
if debug is True and i < 10:
print("v0 :", v0)
print("v_rot:", v_rot[i])
else:
print("You did not give a valid input for the Rodrigues formula.")
return
return v_rot
def sky_coord_rotate(v_i, v0_i, v0_f, theta=0, debug=False):
"""
Gets new angles (RA,Dec) in degrees of a rotated vector.
Parameters
----------
v_i : array_like
Vectors to be rotated.
v0_i : array_like
Vector pointing to the initial centroid position.
v0_f : array_like
Vector pointing to the final centroid position.
theta : float, optional
Angle to rotate (for no translation), in radians. The default is 0.
debug : boolean, optional
True if the reader wants to print debug diagnistics.
The default is False.
Returns
-------
array_like, array_like
Arrays containing the new rotated (RA,Dec) positions.
"""
if (v0_i == v0_f).all():
if debug is True:
print("Pure rotation")
k = v0_i / np.linalg.norm(v0_i)
else:
if debug is True:
print("Translation in spherical geometry")
k = np.cross(v0_i, v0_f) / np.linalg.norm(np.cross(v0_i, v0_f))
theta = angular_sep_vector(v0_i, v0_f)
if debug is True:
print("Vector k:", k)
print("Angle of separation [degrees]:", theta * 180 / np.pi)
v_f = rodrigues_formula(k, v_i, theta)
try:
v_f.shape
except NameError:
print("You did not give a valid input for the Rodrigues formula.")
return
if len(np.shape(v_f)) == 1 and len(v_f) == 3:
v_f = v_f / np.linalg.norm(v_f)
B = np.arcsin(v_f[2])
cosA = v_f[0] / np.cos(B)
sinA = v_f[1] / np.cos(B)
A = np.arctan2(sinA, cosA)
elif len(np.shape(v_f)) == 2:
A = np.zeros(len(v_f))
B = np.zeros(len(v_f))
for i in range(0, len(v_f)):
v_f[i] = v_f[i] / np.linalg.norm(v_f[i])
B[i] = | np.arcsin(v_f[i][2]) | numpy.arcsin |
import numpy as np
from numpy.core.fromnumeric import var
import scipy as sp
from scipy import stats
import matplotlib.pyplot as plt
from commondata import CommonData
from NPV_calc import discrete_cdf
import unittest
from bisect import bisect_left
import time
#import other modules
from RobotScaling import Robots
from Solar_Position_Optimization import PVArrays
from structural_calculation import Structure
from life_support import Life_Support
from Safehouse import Safehouse
from structural_v2 import StressRelated
from thermal_calculations import ThermalControl
from Power import PowerRelated
def name_cleaner(entry):
out = entry.replace("_distro", "")
out = out.replace("_", " ")
out = out.capitalize()
return out
class Anal_Sensitivity():
def __init__(self):
self.data = CommonData()
self.df = self.data.df
self.get_vars()
self.var_list = ['gas_storage__airlock_cycles' , 'total_mass__ls_mass_excluding_water_and_gas', 'total_mass__total_ls_power', 'habitat__day_peak_power','habitat__night_avg_power','habitat__airlock_volume','habitat__safehouse_mass','habitat__safehouse_volume','habitat__extra_cargo_volume','habitat__extra_cargo_mass','rassor__power_draw','athlete__power_draw','robotarm__power_draw','bagging__power_draw', 'nipper__power_draw','solar__average_cell_weight', 'power_storage__life_support_h2_needed', 'power_storage__h2_tank_ref_propellant_mass', 'power_storage__h2_tank_ref_mass', 'power_storage__o2_tank_ref_propellant_mass', 'power_storage__o2_tank_ref', 'habitat__cylinders_mass', 'all_logistics__docking_station', 'all_logistics__internal_transporter_mass', 'habitat__cylinders_volume' , 'all_logistics__internal_transporter_volume']
self.mass_var_list = ['total_mass__total_ls_mass', 'habitat__inflatable_mass', 'all_logistics__total_mass', 'solar__total_mass', 'power_storage__total_mass', 'total_mass__total_ls_mass', 'habitat__safehouse_mass', 'habitat__extra_cargo_mass', 'habitat__cylinders_mass', 'all_logistics__docking_station', 'all_logistics__internal_transporter_mass']
self.volume_var_list = ['habitat__airlock_volume', 'habitat__safehouse_volume', 'habitat__inflatable_volume', 'all_logistics__total_volume', 'solar__total_volume', 'power_storage__total_volume', 'total_mass__total_volume', 'habitat__extra_cargo_volume', 'habitat__cylinders_volume' , 'all_logistics__internal_transporter_volume']
self.power_var_list = ['all_logistics__power_draw']
self.trials = 5000
self.vars_to_pdf(self.var_list)
self.print_all_distros(self.trials)
# self.total_calc()
def get_vars(self, key_list=None):
keys = list(self.data.__dict__.keys())
filtered = list(filter(lambda item: item not in ['df', 'tab_names', 'subtabs', 'missing_keys'] ,keys))
if key_list != None:
filtered = list(filter(lambda item: item in key_list ,filtered))
# print(filtered)
return filtered
def vars_to_pdf(self, vars_list, uncertainty=0.2):
for var in vars_list:
value = getattr(self.data, var)
distro_name = f"{var}_distro"
distro = stats.norm(loc=value, scale=uncertainty*value)
setattr(self, distro_name, distro)
def sample_from_pdfs(self):
keys = list(self.__dict__.keys())
for key in keys:
distro = getattr(self, key)
if type(distro) == stats.distributions.rv_frozen:
non_distro_var_name = key.replace("_distro", "")
setattr(self.data, non_distro_var_name, distro.rvs(size=1)[0])
# print(str(non_distro_var_name),": ",getattr(self, non_distro_var_name))
def sample_calc(self, var_list=None):
self.vars_to_pdf(self.get_vars(var_list))
self.sample_from_pdfs()
# self.print_all_distros(self.trials)
self.total_runner()
return self.outputs_calc()
def outputs_calc(self):
total_mass = sum([float(getattr(self.data, key)) for key in self.mass_var_list])
total_volume = sum(list(float(getattr(self.data, key)) for key in self.volume_var_list))
# max_power = sum(list(float(getattr(self.data, key)) for key in self.power_var_list))
# print("Total Mass: ",total_mass)
# print("Total volume: ",total_volume)
# print("Construction Power Draw: ",max_power)
return total_mass, total_volume
def converger(self, var_list=None):
self.attributes_to_converge_arr(var_list=None, arr_name="init_value_array", local=False)
# self.total_runner(arr_name="constant_filter_array", var_list=var_list)
# print("Init values: ", self.init_value_array[:,1])
# print("Variables: " ,self.constant_filter_array[:,1])
# bool_mask = np.not_equal(self.init_value_array[:,1], self.constant_filter_array[:,1])
# print(bool_mask)
# non_constants = self.init_value_array[:,0]
max_delta = 1
while max_delta > 0.01:
self.attributes_to_converge_arr(var_list=var_list, arr_name="before_arr", local=False)
self.total_runner(arr_name="after_arr", var_list=var_list)
max_delta = self.arr_delta(self.before_arr, self.after_arr)
print("Maximum Delta: ", max_delta)
def arr_delta(self, arr1, arr2):
# print(arr1[:5,0])
# print(arr2[:5,0])
# names_same = np.equal(arr1[:,0], arr2[:,0])
vals1 = arr1[:,1][arr1[:,0] == arr2[:,0]].astype(float)
vals2 = arr2[:,1][arr1[:,0] == arr2[:,0]].astype(float)
ratios = np.abs(vals1/vals2)
print(ratios)
largest_delta = np.amax((ratios/vals1))
return largest_delta
def total_runner(self):
Structure()
StressRelated()
ThermalControl()
Robots(50,317,350,10,277)
Life_Support()
PVArrays()
Safehouse()
PowerRelated()
def attributes_to_converge_arr(self, var_list, arr_name, local=False):
keys = self.get_vars(var_list)
arr_out = np.array(keys)
if local:
values = np.array([float(getattr(self, key)) for key in keys])
elif not local:
values = np.array([float(getattr(self.data, key)) for key in keys])
length = len(list(arr_out))
arr_out = np.reshape(arr_out, (length,1))
values = np.reshape(values, (length,1))
values = values.astype(float)
arr_out = np.hstack((arr_out, values))
setattr(self, arr_name, arr_out)
def print_all_distros(self, trials):
keys = list(self.__dict__.keys())
for key in keys:
distro = getattr(self, key)
if type(distro) == stats.distributions.rv_frozen:
self.print_distro(trials, distro, title=str(key), x_axis="test", y_axis="Relative Frequency")
def print_distro(self, trials, distro=None, title=None, x_axis=None, y_axis=None):
r = np.sort(distro.rvs(size=trials))
fig = plt.figure()
ax = fig.add_subplot(111)
# Adjust the subplots region to leave some space for the sliders and buttons
# fig.subplots_adjust(left=0.25, bottom=0.25)
#calculate interquartile range, mean and standard deviations
# Q25 = np.percentile(np.sort(r), 25, interpolation = 'midpoint').round(2)
# Q75 = np.percentile(np.sort(r), 75, interpolation = 'midpoint').round(2)
# IQR = Q75 - Q25
mean = np.sort(r).mean().round(2)
std = np.sort(r).std().round(2)
# Define an axes area and draw a slider in it
# amp_slider_ax = fig.add_axes([0.25, 0.15, 0.65, 0.03], facecolor=axis_color)
# amp_slider = Slider(amp_slider_ax, 'Amp', 0.1, 10.0, valinit=amp_0)
# Draw another slider
# freq_slider_ax = fig.add_axes([0.25, 0.1, 0.65, 0.03], facecolor=axis_color)
# freq_slider = Slider(freq_slider_ax, 'Freq', 0.1, 30.0, valinit=freq_0)
#if plotting pure distribution, calculate CDF of negative values and plot PDF
neg = distro.cdf(0).round(5)
ax.plot( | np.sort(r) | numpy.sort |
import matplotlib.pyplot as plt
import numpy as np
SAMPLE_RATE = 44100.0 # hertz
def plot_show(t, data, title='Data'):
fig, ax = plt.subplots()
ax.plot(t, data)
ax.set(xlabel='time (s)', ylabel='data (wave)', title=title)
ax.grid()
#fig.savefig("test.png")
plt.show()
def fft_plot(t, vals, sample_rate=SAMPLE_RATE):
fourier = | np.fft.fft(vals) | numpy.fft.fft |
import matplotlib.pyplot as plt
import urllib.request as request
import torch
import tarfile
import os
import numpy as np
from torch.utils.data import TensorDataset
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import matplotlib
matplotlib.use('agg')
class NotMNISTLoader:
"""
Convenience class to load the notMNIST (letters) dataset and to create
`Pytorch` train and test `dataloaders`
"""
def __init__(self, folder_path='.'):
"""
:param folder_path: str, path to the notMNIST data set or folder
"""
self.train_dataloader = None
self.test_dataloader = None
self.train_tragets = None
self.test_targets = None
self.folder_path = folder_path
def create_dataloader(self, batch_size=32, standardize=True, test_size=0.3,
train_size=None, save=False, **kwargs):
"""
Creates train and test `Pytorch` dataloaders
:param batch_size: int, size of the batch
:param standardize: bool, indicates if the train and test set should
normalized by :math:`\\frac{x - \\mu}{\\sigma}`
:param test_size: float or int, If float: Percentage to split the
dataset to train and test, should be in [0,1)
If int: Absolute number for the test set size
:param train_size: float or int, float or int, If float: Percentage to
split the dataset to train and test, should be in [0,1)
If int: Absolute number of the train set size
:param save: bool, If the created dataloaders should be saved as a dictionary,
path filename should be given in kwargs as `filename`
See also `save_to_disk`
:param kwargs: `filename`: str, indicates where to save the dataloader,
to be used in combination with `save`
:return: Pytorch train and test dataloader
"""
letters = os.listdir(self.folder_path)
# Retrieve pictures files names
picture_files = {}
n_pictures = 0
for letter in letters:
fn = [name for name in os.listdir(os.path.join(self.folder_path, letter))
if name.endswith('.png')]
picture_files[letter] = fn
# Get the actual pictures
data = {}
for key in picture_files:
files = picture_files[key]
data[key] = []
for f in files:
n_pictures += 1
try:
data[key].append(plt.imread(
os.path.join(self.folder_path, key, f)))
except Exception as e:
print(f, e)
# Merge all data to one list
X = []
Y = []
X_nd = np.zeros(shape=(n_pictures, 28, 28))
for key, list_ in data.items():
for img in list_:
X.append(img)
Y.append(key)
for i in range(len(X)):
X_nd[i, :, :] = X[i]
lbl_enc = LabelEncoder()
labels = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'])
lbl_enc.fit(labels)
Y = lbl_enc.transform(Y)
# there are inconsistencies in the length of the dataset
# the length of the dataset is adapted to the legnth of the labels
X_nd = X_nd[:len(Y)]
X_train, X_test, y_train, y_test = train_test_split(X_nd, Y,
train_size=train_size,
test_size=test_size)
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
if standardize:
mean = kwargs.get('mean', 0.5)
std = kwargs.get('std', 0.5)
X_train = np.divide(X_train - mean, std)
X_test = np.divide(X_test - mean, std)
X_train = torch.from_numpy(X_train).view(-1, 1, 28, 28)
X_test = torch.from_numpy(X_test).view(-1, 1, 28, 28)
train_set = TensorDataset(X_train, torch.from_numpy(y_train))
self.train_dataloader = torch.utils.data.DataLoader(
train_set, batch_size=batch_size, shuffle=True)
test_set = TensorDataset(X_test, torch.from_numpy(y_test))
self.test_dataloader = torch.utils.data.DataLoader(
test_set, batch_size=batch_size, shuffle=True)
if save:
self.save_to_disk(kwargs['filename'])
return self.train_dataloader, self.test_dataloader
def load_from_file(self, path):
"""
Loads the dataloader dictionary from disk. The variables
`self.train_dataloader` and `self.test_dataloader` contain
the dictionary contents. Returns also the dictionary.
:param path: str, path to the dictionary
:return dict, Dictionary containing the dataloaders
"""
dataloaders = | np.load(path) | numpy.load |
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
class AstrocytePlotter():
def __init__(self, output_folder):
self.output_folder = output_folder
#For correlation plots
self.filter_probs = [0.05, 0.10, 0.25]
self.n_samples_corr_fake = 20
self.num_frames_splits_l = [250, 500, 1000, 3000, 6000, 12000, 24000, 100000]
self.num_frames_splits_m_l = [0.5, 1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80]
self.num_frames_splits_splits_m_l = [10, 15, 20, 25, 30, 35, 40]
self.max_split_comparison_samples = 100
self.behaviours_list_a = ['default', 'rest', 'running',
'running_start', 'running_before', 'stick',
'stick_start', 'stick_end', 'stick_expect',
'stick_rest', 'whisker_rest_stick', 'whisker_stick']
self.behaviours_list_small = ['whisker_rest_stick', 'default', 'rest', 'running', 'stick']
def setup_plot_folders(self, output_experiment_path):
paths = ['borders', 'behaviour_heatmaps', 'behaviours_basic',
'signal_delays', 'signal_durations', 'triplet', 'behaviour_activity',
'behaviour_areas', 'signal_basic_samples', 'signal_behaviour_samples',
'correlations', 'random_events', 'splits', 'splits_self', 'signal_amplitudes',
'signal_proportion_delays', 'signal_stick_run_samples', 'splits_split_split',
'triplet_bar', 'size_v_time_corr',
'behaviour_heatmaps_threshold_with_random',
'split_behaviour_grids',
'size_histogram_bh_comparison_individual', 'amplitude_histogram_bh_comparison_individual', 'duration_histogram_bh_comparison_individual',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path, 'plots' , p))
except:
pass
def setup_file_folders(self, output_experiment_path):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path, 'files', p))
os.makedirs(os.path.join(output_experiment_path, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_comparison(self, output_experiment_path_comparison):
paths = ['behaviour_heatmaps', 'triplet', 'intersection', 'correlations', 'align',
'intersection_border_xcorr_aligned',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path_comparison, 'plots', p))
except:
print('Folder structure exists?')
def setup_file_folders_comparison(self, output_experiment_path_comparison):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path_comparison, 'files', p))
os.makedirs(os.path.join(output_experiment_path_comparison, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_all_comparison(self, output_experiment_path_all_comparison):
#print(output_experiment_path_all_comparison)
paths = ['size_histogram_comparison', 'amplitude_histogram_comparison', 'duration_histogram_comparison',
'size_histogram_bh_comparison', 'amplitude_histogram_bh_comparison', 'duration_histogram_bh_comparison',
'activity_all', 'activity_all_number_minute', 'waterfall_together', 'signal_proportion_delays',
'signal_proportion_delays_alt_average_proportions',
'behaviour_heatmaps_V2_comparison_scale',
'bar_rest_run_all',
'bar_rest_rest_stick_all',
'bar_run_run_stick_all',
'dot_rest_run_pair_all',
'bar_run_stick_run_transition_all',
'rest_to_run_proportions_alt',
'run_to_rest_proportions_alt',
'run_stick_run_proportions_alt',
'run_stick_run_proportions_alt_filter_max_3_frames',
'run_stick_run_proportions_alt_filter_max_5_frames',
'rest_to_run_amplitudes_default_alt',
'rest_to_run_amplitudes_alt',
'rest_to_run_durations_alt',
'rest_to_run_sizes_alt',
'rest_to_run_speed_alt',
'rest_to_run_pupil_alt',
'run_to_rest_amplitudes_default_alt',
'run_to_rest_amplitudes_alt',
'run_to_rest_durations_alt',
'run_to_rest_sizes_alt',
'rest_to_run_amplitudes_default_outlier_alt',
'rest_to_run_amplitudes_outlier_alt',
'rest_to_run_durations_outlier_alt',
'rest_to_run_sizes_outlier_alt',
'run_to_rest_amplitudes_default_outlier_alt',
'run_to_rest_amplitudes_outlier_alt',
'run_to_rest_durations_outlier_alt',
'run_to_rest_sizes_outlier_alt',
'run_to_rest_speed_alt',
'run_to_rest_pupil_alt',
'run_stick_run_amplitudes_default_alt',
'run_stick_run_amplitudes_alt',
'run_stick_run_durations_alt',
'run_stick_run_sizes_alt',
'run_stick_run_amplitudes_default_outlier_alt',
'run_stick_run_amplitudes_outlier_alt',
'run_stick_run_durations_outlier_alt',
'run_stick_run_sizes_outlier_alt',
'run_stick_run_speed_alt',
'run_stick_run_pupil_alt',
'run_stick_run_amplitudes_default_alt_filter_max_3_frames',
'run_stick_run_amplitudes_alt_filter_max_3_frames',
'run_stick_run_durations_alt_filter_max_3_frames',
'run_stick_run_sizes_alt_filter_max_3_frames',
'run_stick_run_speed_alt_filter_max_3_frames',
'run_stick_run_pupil_alt_filter_max_3_frames',
'run_stick_run_amplitudes_default_alt_filter_max_5_frames',
'run_stick_run_amplitudes_alt_filter_max_5_frames',
'run_stick_run_durations_alt_filter_max_5_frames',
'run_stick_run_sizes_alt_filter_max_5_frames',
'run_stick_run_speed_alt_filter_max_5_frames',
'run_stick_run_pupil_alt_filter_max_5_frames',
'all_amplitudes', 'all_durations', 'all_sizes',
'all_amplitudes_filt_bh', 'all_durations_filt_bh', 'all_sizes_filt_bh',
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'correlations_csv',
'correlations_long_events_csv',
'correlations_short_events_csv',
'correlations_no_align_csv',
'correlations_no_align_long_events_csv',
'correlations_no_align_short_events_csv',
'control',
'outliers',
'triplet_dot_all',
'size_v_time_corr_ALL',
'speed_v_events_ALL',
'split_correlation_all',
'behaviour_over_recording',
'pixel_distribution',
'splits_self_all',
]
data_paths = [
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'control',
'outliers',
'behaviour_ratios',
'top_average_values',
'split_correlation_all',
'splits_self_all'
]
for p in paths:
#print('Trying...', p)
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'plots', p))
except:
print('Folder structure exists?')
for p in data_paths:
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'data', p))
except:
print('Folder structure exists?')
def get_output_experiment_path(self, astroA, output_folder):
experiment_id = '/'.join(astroA.experiment_path.split('/')[-2:])
output_experiment_path = os.path.join(output_folder, experiment_id)
return output_experiment_path
def plot_all_single(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_plot_folders(output_experiment_path)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = self.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting random samples of signals...')
fig_signals = self.get_signal_figs_samples(astroA, 20)
for i, fig_signal in enumerate(fig_signals):
fig_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_basic_samples', 'signal_{}'.format(i))
saving_utils.save_plotly_fig(fig_signal, fig_signal_path)
print('Plotting borders...')
#Borders plot
fig_border = self.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = self.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = self.get_behaviour_activity_plot(astroA)
print('BEHAVIOUR ACTIVITY PATH \nn\\n\n\n\n', behaviour_activity_path)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = self.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = self.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
print('Plotting random samples of signals on different behaviours...')
fig_bk_signals = self.get_signal_bk_figs_samples(astroA, 3)
for bk in fig_bk_signals.keys():
for i, fig_bk_signal in enumerate(fig_bk_signals[bk]):
fig_bk_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_behaviour_samples', 'signal_{}-{}'.format(bk, i))
saving_utils.save_plotly_fig(fig_bk_signal, fig_bk_signal_path)
print('Plotting local signal samples with stick and running...')
stick_run_sample_path = os.path.join(output_experiment_path, 'plots', 'signal_stick_run_samples')
fig_stick_run_samples_l = self.get_stick_run_sample_figs(astroA)
for i, sample_figs in enumerate(fig_stick_run_samples_l):
saving_utils.save_plotly_fig(sample_figs[0], os.path.join(stick_run_sample_path, '{}-running'.format(i)))
saving_utils.save_plotly_fig(sample_figs[1], os.path.join(stick_run_sample_path, '{}-stick'.format(i)))
for j in range(min(10, len(sample_figs[2]))):
saving_utils.save_plotly_fig(sample_figs[2][j], os.path.join(stick_run_sample_path, '{}-signal_{}'.format(i, j)))
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots for SINGLE...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence))
plot, stats_d = self.measure_distribution_bh_compare_plot([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode='MOA')
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
'''
for confidence in [True]:
for with_log in [False, True]:
try:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
print('THE STAT HERE?', stats_d)
except Exception as e:
print('EXCEPTION\n\n\n', 'CONF', confidence, 'LOG', with_log, 'measure' ,measure)
'''
print('Plotting signal durations...')
#Signal durations plot
durations_base_path = os.path.join(output_experiment_path, 'plots', 'signal_durations')
fig_durations = self.get_signal_durations_plot(astroA)
for k in fig_durations.keys():
saving_utils.save_plotly_fig(fig_durations[k], os.path.join(durations_base_path, k + '-durations'))
'''
if astroA.aqua_bound == True:
print('Plotting triplet plot...')
#Triplet plot
triplet_base_path = os.path.join(output_experiment_path, 'plots' , 'triplet')
radii_path = os.path.join(output_experiment_path, 'plots', 'triplet', 'radii')
fig_triplets, fig_radii_border = self.get_triplet_plots(astroA, n_bins=8)
for k in fig_triplets.keys():
saving_utils.save_plotly_fig(fig_triplets[k], os.path.join(triplet_base_path, k + '-triplet'))
saving_utils.save_plotly_fig(fig_radii_border, radii_path)
print('Plotting bar plots (triplet plot bands) num_events, duration, amplitude, ')
measure_names = [None, 'Area', 'Amplitude', 'Time (s)']
for bh in ['default', 'rest', 'running', 'stick', 'stick_rest', 'stick_run_ind_15']:
for i, measure in enumerate([None, 'area', 'dffMax2', 'time_s']):
path = os.path.join(output_experiment_path, 'plots', 'triplet_bar', '{}_{}'.format(bh, measure))
if bh in astroA.event_subsets:
fig = self.triplet_bar_plot(astroA, bh=bh, measure=measure, n_bins=8, y_title=measure_names[i])
print('SAVING TRIPLET BAR')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('Plotting Signal duration split relative differences...')
duration_split_differences_path = os.path.join(output_experiment_path, 'plots', 'signal_durations', 'duration_splits_relative_differences')
fig_duration_split_differences = self.get_duration_split_differences_from_default(astroA)
saving_utils.save_plotly_fig(fig_duration_split_differences, duration_split_differences_path)
'''
'''
#Signal delays plot
signal_delays_path = os.path.join(output_experiment_path, 'plots' , 'signal_delays')
print('Plotting signal delays')
fig_delays_waterfall_d, fig_delays_waterfall_interpolate_d = self.get_waterfall_delays_plot_all(astroA)
for fig_k in fig_delays_waterfall_d.keys():
print('FIG K', fig_k)
saving_utils.save_plotly_fig(fig_delays_waterfall_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall'))
saving_utils.save_plotly_fig(fig_delays_waterfall_interpolate_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall_interpolate'))
print('Plotting singal proportion delays...')
fig_proportion_delays_path = os.path.join(output_experiment_path, 'plots', 'signal_proportion_delays')
fig_proportion_delays_d = self.get_proportion_delays_plot_all([astroA])
for fig_k in fig_proportion_delays_d.keys():
saving_utils.save_plotly_fig(fig_proportion_delays_d[fig_k], os.path.join(fig_proportion_delays_path, fig_k))
print('Plotting sample frame split examples...')
figs_frame_split_examples = self.get_frame_split_example_plots(astroA)
for pk in figs_frame_split_examples.keys():
for frame_split in figs_frame_split_examples[pk].keys():
figs_frame_split_example_path = os.path.join(output_experiment_path, 'plots', 'correlations', 'frame_split_pair_example_frames_{}_p={}'.format(frame_split, pk))
saving_utils.save_plotly_fig(figs_frame_split_examples[pk][frame_split], figs_frame_split_example_path)
print('Plotting random astrocyte FULL sample plots...')
figs_random_event_path = os.path.join(output_experiment_path, 'plots', 'random_events')
fig_l = self.get_random_astrocyte_plot(astroA)
for i, fig in enumerate(fig_l):
saving_utils.save_plotly_fig(fig, os.path.join(figs_random_event_path, 'sample_{}'.format(i)))
'''
'''
print('Plotting split counter')
figs_frame_split = self.get_compare_frame_split_plots(astroA)
for pk in figs_frame_split.keys():
figs_frame_split_path = os.path.join(output_experiment_path, 'plots', 'splits', 'splits_p={}'.format(pk))
saving_utils.save_plotly_fig(figs_frame_split[pk], figs_frame_split_path)
#TODO RUN THIS
print('Plotting frame split xcorr value to full self (self<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting frame split xcorr value to splits splits (split<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting first last 20 min of rest heatmap comparison...')
fig_20min_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_first_last_rest_20min')
fig_20min_rest = self.get_plot_first_last_x_min_behaviour(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_rest is not None:
saving_utils.save_plotly_fig(fig_20min_rest, fig_20min_rest_path)
print('Plotting continuous 20 min rest heatmaps compared to start...')
fig_20min_cont_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'cont_splits_first_last_rest_20min')
fig_20min_cont_rest = self.get_plot_x_min_rest_relative(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_cont_rest is not None:
saving_utils.save_plotly_fig(fig_20min_cont_rest, fig_20min_cont_rest_path)
'''
'''
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path, 'plots', 'size_v_time_corr')
areas = np.log(astroA.res_d['area'])
times = astroA.res_d['time_s']
r, p = stat_utils.get_pearsonr(times, areas)
df = pd.DataFrame({'Size': areas, 'Time': times})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
'''
'''
print('Split BEHAVIOUR GRIDS...')
n_chunks = 3
for bh in ['default', 'running', 'rest']:
event_grid_splits = aqua_utils.split_n_event_grids(astroA, bh=bh, n=n_chunks)
path = os.path.join(output_experiment_path, 'plots', 'split_behaviour_grids')
for i, event_grid_split in enumerate(event_grid_splits):
plot = plotly_utils.plot_contour(event_grid_split, title='{}-split {}/{}'.format(bh, i+1, len(event_grid_splits)))
saving_utils.save_plotly_fig(plot, os.path.join(path, 'bh_{}-split_{}-chunks_{}'.format(bh,i,n_chunks)))
'''
'''
print('HEATMAPS V2_2... (each astro day scaled with random)')
for dff_mode in ['False']:
#for bh in ['default', 'running', 'rest', 'stick_run_ind_15', 'stick_rest']:
for bh in ['default']:
print('THIS REPETITION LOOP MUST BE ONCE')
path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_threshold_with_random')
d = self.get_individual_heatmaps_threshold_scaled(astroA, bh=bh, threshold=0.7, num_samples=3, dff_mode=dff_mode)
if d is None:
continue
saving_utils.save_plotly_fig(d['contour'], os.path.join(path, 'bh_{}-dff_{}'.format(bh, dff_mode)))
for i, contour_random in enumerate(d['contour_random']):
saving_utils.save_plotly_fig(contour_random, os.path.join(path, 'bh_{}-dff_{}-random_{}'.format(bh, dff_mode, i)))
'''
'''
#Every 60 seconds, whole vid
with_donwsample = True
downsample_length = int(astroA.fr * 60)
second_length = astroA.fr
bh_l = ['default', 'rest', 'running']
end_t = -1
start_t = 0
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
#Every 2 seconds, first 120 seconds
with_donwsample = True
downsample_length = int(astroA.fr * 2)
end_t = int(1200 * astroA.fr)
start_t = 0
second_length = astroA.fr
#bh_l = ['default', 'rest', 'running']
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
end_t = int(120*astroA.fr)
time_sorted_events_trunc = sorted((i for i,e in enumerate(astroA.res_d['tEnd']) if (e < frame_max)))
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_precise-{}-d{}-e{}'.format(bh, downsample_length, end_t))
downsample_length = int(astroA.fr * 2)
self.make_event_appended_video_precise(astroA,
event_l=time_sorted_events_trunc,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['rest', 'running']
for bh in bh_l:
start_t = 0
end_t = int(1200 * astroA.fr)
downsample_length = int(astroA.fr * 2)
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_bh_frames-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video_bh_frames(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
def make_event_appended_video_bh_frames(self, astro, bh, start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
curr_indices = astro.indices_d[bh][start_t:end_t]
if len(curr_indices) % downsample_length != 0:
curr_indices_fix = curr_indices[:-(len(curr_indices) % downsample_length)]
else:
curr_indices_fix = curr_indices
num_splits = len(curr_indices_fix) // downsample_length
curr_indices_split = {i : curr_indices_fix[i*downsample_length:(i+1)*downsample_length] for i in range(num_splits)}
curr_indices_split['default'] = astro.indices_d['default']
bh_event_subsets = aqua_utils.get_event_subsets(curr_indices_split, astro.res_d)
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i in range(num_splits):
print(i, '/', num_splits)
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][bh_event_subsets[i]], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
def make_event_appended_video(self, astro, bh='default', start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
# Create array of (end_t - start_t) values consisting of event indices (lists) inside each frame
#Time sorted events [[time, event_id], ..] sorted by time
with_downsample = False if downsample_length == 1 else True
if end_t == -1:
end_t = astro.total_indices
time_sorted_events = deque(sorted((e,i) for i,e in enumerate(astro.res_d['tBegin'][astro.event_subsets[bh]])))
#Populate events over time: for each frame we have a list of event indices starting then
events_ot_l = []
for t in range(start_t, end_t):
events_ot_l.append([])
#As long as first element has same time, we pop to add to our list
while(len(time_sorted_events) != 0 and t == time_sorted_events[0][0]):
events_ot_l[t].append(time_sorted_events.popleft()[1])
#################################################################
#Downsample
if with_downsample:
new_events_ot_l = general_utils.merge_l_l(events_ot_l, downsample_length)
else:
# copy it, not really need to
new_events_ot_l = [ev for ev in events_ot_l]
# Generate plots over time
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i, segment_events_l in enumerate(new_events_ot_l):
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][segment_events_l], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
#Normalize
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length if with_downsample else 1)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
#Pass event list to choose which events. E.g. events in first 2 minutes
#Slow but potentially prettier method. You can see each individual event its duration
def make_event_appended_video_precise(self, astro_curr, event_l, end_t, downsample_length, save_base_path):
dim_1 = astro_curr.input_shape[0]
dim_2 = astro_curr.input_shape[1]
#dim_3 = np.sum([x[2] for x in astro_curr.input_shape_l])
dim_3 = end_t
a = np.zeros([dim_1, dim_2, dim_3])
for i, event in enumerate(astro_curr.res_d['x3D'][event_l]):
print(i)
unraveled = np.unravel_index(event, [dim_1, dim_2, dim_3], order='F')
begin_time = np.min(unraveled[2])
end_time = np.max(unraveled[2])
added_arr = np.zeros([dim_1, dim_2])
for u_i in range(len(unraveled[0])):
c_0 = unraveled[0][u_i]
c_1 = unraveled[1][u_i]
t = unraveled[2][u_i]
#print('begin {} end {}'.format(begin_time, end_time))
if added_arr[c_0, c_1] == 1:
continue
a[c_0, c_1, t:] += 1
added_arr[c_0, c_1] = 1
return a
for i in range(a_3d.shape[2] // (downsample_length if with_downsample else 1)):
print(i)
x2d = np.sum(a_3d[:, :, i*downsample_length:(i+1)*downsample_length], axis=2)
#Normalize
x2d_all_normalized = np.copy(x2d) / ((i+1) * (downsample_length if with_downsample else 1)) * astro_curr.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - | np.min(x2d_all_normalized) | numpy.min |
from __future__ import division, print_function
import contextlib
from itertools import product
import sys
import warnings
import numpy as np
from numba import unittest_support as unittest
from numba import jit, errors
from .support import TestCase, tag
from .matmul_usecase import matmul_usecase, needs_matmul, needs_blas
try:
import scipy.linalg.cython_lapack
has_lapack = True
except ImportError:
has_lapack = False
needs_lapack = unittest.skipUnless(has_lapack,
"LAPACK needs Scipy 0.16+")
def dot2(a, b):
return np.dot(a, b)
def dot3(a, b, out):
return np.dot(a, b, out=out)
def vdot(a, b):
return | np.vdot(a, b) | numpy.vdot |
import h5py
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib import dates
import pyresample as pr
from scipy.spatial import cKDTree
from pyproj import Proj
from scipy.interpolate import interp1d
import scipy
import pandas as pd
import netCDF4
def apr3tocit(apr3filename,fl,sphere_size,psd_filename_2ds,psd_filename_HVPS,query_k = 1,plotson=False,QC=False,slimfast=True,cit_aver=False,cit_aver2=False,
attenuation_correct=False,O2H2O={},per_for_atten = 50,
return_indices=False,BB=True,bbguess=500,
cal_adj_bool = False,cal_adj=0,
cloudtop=True,rollfix=True):
"""
=================
This function finds either the closest gate or averages over a number of gates (query_k) nearest to
the citation aircraft in the radar volume of the APR3. It can return a dict of the original full length
arrays and the matched arrays.
=====
Vars:
=====
apr3filename = str, filename of the apr hdf file
fl = awot object, the citation awot object
sphere_size = int, maximum distance allowed in the kdTree search
psd_filename_2ds = str, filename of the processed 2DS file
psd_filename_HVPS = str, filename of the processed HVPS3 file
query_k = int, number of gates considered in the average (if 1, use closest)
plotson = boolean, will create some premade plots that describe the matched data
QC = boolean, will apply a simple QC method: eliminates any gate within 0.5 km to the surface and the outliers
(plus/minus 1.5IQR)
slimfast = boolean, will not save original data. Cuts down on output file size by only outputting the matched data and the citation data.
cit_aver = boolean, averages the ciation data varibles using a 5 second moving average (there is overlap)
cit_aver2 = boolean, averages the ciation data varibles using a 5 second discrete average (there is NO overlap)
O2H20 = dict, data from sounding to correct for attenuation from O2 and H2O vapor
attenuation_correct = boolean, corrects for attenuation using LWC prof and Sounding. Uses 50th percentile of LWC Prof
per_for_atten = int, the percentile for the supercooled liquid water profile used in the attenuation correction.
return_indeices of matches = boolean, returns the matched gates in 1d coords
BB = boolean, mask gates from the BB and lower. Masks data using the BB_alt algorithm
bbguess = int, give your first guess of where the Bright Band is to assist the BB_alt algorithm
cal_adj_bool = bool, turn on calibration adjustment or not.
cal_adj = array, array of the adjustment needed for correct calibration between frequencies. [ka_adj, w_adj]
cloudtop = bool, eliminates sensativity issues with the Ku-band data (~ < 10 dBZ) by masking out the cloudtop noise using a gausian filter
rollfix = bool, turn on or off the masking of data where the plane is rolling more than 10 degrees (can change the degree of degrees).
=================
"""
#get citation times (datetimes)
cit_time = fl['time']['data']
#Eliminate BB?
if BB:
#Get rid of anything below the melting level + 250 m
apr = apr3read(apr3filename)
#there are two methods to this. One is more conservative (using mean Ku) the other more intense with LDR Ku
#apr = BB_alt(apr,bbguess) #old
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
###new BB tech 2/27/18 RJC
print('Removing BB and below')
apr = mask_surf(apr)
apr['ldr'] = np.ma.masked_where(apr['Ku'].mask,apr['ldr'])
#find bb profs
bb = precip_echo_filt3D(apr['ldr'],thresh=7)
ind1 = np.where(bb[12,:] == 1) #BB profiles based on LDR
top_a = find_bb(apr,ind1)
bb_long = extend_bb(ind1,apr['timedates'][12,:],top_a)
apr['Ku'][:,:,:] = np.ma.masked_where(apr['alt_gate'][:,:,:] <= bb_long,apr['Ku'][:,:,:])
apr['Ka'] = np.ma.masked_where(apr['Ku'].mask,apr['Ka'])
apr['W'] = np.ma.masked_where(apr['Ku'].mask,apr['W'])
###
#correct for attenuation using SLW and Ku
if attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor3(apr,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
elif attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor2(apr3filename,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
else:
apr = apr3read(apr3filename)
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
if cal_adj_bool:
print('adding calibration means...')
# These values come from the analysis preformed by 3 reasearch groups: NASA JPL, University of Leister, and the University of Illinois. Techniques use sigma_0 of the ocean surface, comparision of frequencies at low Z and numerical simulations of particles.(error/uncertainty:+- 0.5 dB)
apr['Ku'] = apr['Ku'] + 0.8
apr['Ka'] = apr['Ka'] + 1
#Whh is the only one with a time varient calibration adjustment
apr['W'] = apr['W'] + cal_adj
#While calibrating the data, radar artifacts showed up when the roll of the aircraft was > 10degrees.
if rollfix:
roll = apr['roll']
roll3d = np.zeros(apr['Ku'].shape)
for i in np.arange(0,apr['Ku'].shape[1]):
for j in np.arange(0,apr['Ku'].shape[2]):
roll3d[:,i,j] = roll[i,j]
apr['Ku'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ku'])
apr['Ka'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ka'])
apr['W'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['W'])
#Get APR3 times (datetimes)
time_dates = apr['timedates'][:,:]
#fix a few radar files where w-band disapears
if time_dates[12,0] >= datetime.datetime(2015,12,18,6,58):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,18,7,6),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
if time_dates[12,0] >= datetime.datetime(2015,12,1,23,43,48) and time_dates[12,0] <=datetime.datetime(2015,12,1,23,43,49):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,2,0,1,40),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
#Check if radar file is large enought to use (50 gates is arbitrary)
if time_dates[12,:].shape[0] < 50:
print('Limited radar gates in time')
#return
#
#Load PSD
dtime_psd,ND,dD,midpoints = PSD_load(psd_filename_2ds,psd_filename_HVPS,day = time_dates[0,0].day,month=time_dates[0,0].month)
#
#Make ND a masked array (i.e. get rid of nans from loading it in)
ind = np.isnan(ND)
ND = np.ma.masked_where(ind,ND)
#for plotting routine
fontsize=14
#
#Varibles needed for the kdtree
leafsize = 16
query_eps = 0
query_p=2
query_distance_upper_bound = sphere_size
query_n_jobs =1
Barnes = True
K_d = sphere_size
#
#Pre-Determine arrays
Ku_gate = np.ma.array([])
Ka_gate = np.ma.array([])
W_gate = np.ma.array([])
DFR_gate = np.ma.array([])
DFR2_gate = np.ma.array([])
DFR3_gate = np.ma.array([])
lon_c = np.ma.array([])
lat_c = np.ma.array([])
alt_c = np.ma.array([])
t_c = np.ma.array([])
lon_r = np.ma.array([])
lat_r = np.ma.array([])
alt_r = np.ma.array([])
t_r = np.ma.array([])
dis_r = np.ma.array([])
ind_r = np.ma.array([])
conc_hvps3 = np.ma.array([])
T_c = np.ma.array([])
lwc_c = np.ma.array([])
ice_c = np.ma.array([])
cdp_c = np.ma.array([])
twc_c = np.ma.array([])
iwc_c = np.ma.array([])
#
#Set reference point (Currently Mount Olympus, Washington)
lat_0 = 47.7998
lon_0 = -123.7066
#
#Set up map projection to calculate cartesian distances
p = Proj(proj='laea', zone=10, ellps='WGS84',
lat_0=lat_0,
lon_0=lon_0)
#
#make a 1d array of times and find radar start and end times
td = np.ravel(time_dates)
datestart = td[0]
dateend = td[td.shape[0]-1]
#
#Expand apr3 time to plus/minus 4 mins (added 11/8/17) 4 minutes is arbitrary, but what I used for 'good' matches.
datestart = datestart - datetime.timedelta(minutes=4)
dateend = dateend + datetime.timedelta(minutes=4)
#
#Constrain Citation data to radar time
ind = np.where(cit_time > datestart)
ind2 = np.where(cit_time < dateend)
ind3 = np.intersect1d(ind,ind2)
cit_time2 = fl['time']['data'][ind3]
cit_lon = fl['longitude']['data'][ind3]
cit_lat = fl['latitude']['data'][ind3]
cit_alt = fl['altitude']['data'][ind3]
bigins = 0
#
#Average Citation data
if cit_aver:
#Moving average tech.
temp1 = fl['temperature']['data']
temp2 = fl['lwc1']['data']
temp3 = fl['mso_frequency']['data']
temp4 = fl['Conc_CDP']['data']
temp5 = fl['twc']['data']
temp6 = fl['Nev_IWC']['data']
temp7 = fl['dewpoint_temperature1']['data']
temp8 = fl['Wwind']['data']
temp9 = fl['static_pressure']['data']
temp10 = fl['mixing_ratio']['data']
temp11 = fl['Uwind']['data']
temp12 = fl['Vwind']['data']
nsecs = 2
indarray1 = ind3 - nsecs
indarray2 = ind3 + nsecs + 1
temperature_1 = np.ma.zeros(len(ind3))
lwc = np.ma.zeros(len(ind3))
ice = np.ma.zeros(len(ind3))
cdp = np.ma.zeros(len(ind3))
twc = np.ma.zeros(len(ind3))
iwc = np.ma.zeros(len(ind3))
td = np.ma.zeros(len(ind3))
w = np.ma.zeros(len(ind3))
P = np.ma.zeros(len(ind3))
mix = np.ma.zeros(len(ind3))
U = np.ma.zeros(len(ind3))
V = np.ma.zeros(len(ind3))
for i in np.arange(0,len(ind3)):
temperature_1[i] = np.ma.mean(temp1[indarray1[i]:indarray2[i]])
lwc[i] = np.ma.mean(temp2[indarray1[i]:indarray2[i]])
ice[i] = np.ma.mean(temp3[indarray1[i]:indarray2[i]])
cdp[i] = np.ma.mean(temp4[indarray1[i]:indarray2[i]])
twc[i] = np.ma.mean(temp5[indarray1[i]:indarray2[i]])
iwc[i] = np.ma.mean(temp6[indarray1[i]:indarray2[i]])
td[i] = np.ma.mean(temp7[indarray1[i]:indarray2[i]])
w[i] = np.ma.mean(temp8[indarray1[i]:indarray2[i]])
P[i] = np.ma.mean(temp9[indarray1[i]:indarray2[i]])
mix[i] = np.ma.mean(temp10[indarray1[i]:indarray2[i]])
U[i] = np.ma.mean(temp11[indarray1[i]:indarray2[i]])
V[i] = np.ma.mean(temp12[indarray1[i]:indarray2[i]])
#Find average N(D)
ND_sub_a = np.ma.zeros(ND[0,:].shape)
ND_aver = np.ma.zeros([ind3.shape[0],ND[0,:].shape[0]])
for i in np.arange(0,ind3.shape[0]):
if indarray2[i] > ND.shape[0]:
print('indarray4 is too big')
break
ND_sub = ND[indarray1[i]:indarray2[i],:]
ind = np.where(ND_sub < 0)
ND_sub[ind] = np.ma.masked
for j in np.arange(ND.shape[1]):
ND_sub_a[j] = np.ma.mean(ND_sub[:,j])
ND_aver[i,:] = ND_sub_a
elif cit_aver2:
#Discrete average tech.
temp1 = fl['temperature']['data'][ind3]
temp2 = fl['lwc1']['data'][ind3]
temp3 = fl['mso_frequency']['data'][ind3]
temp4 = fl['Conc_CDP']['data'][ind3]
temp5 = fl['twc']['data'][ind3]
temp6 = fl['Nev_IWC']['data'][ind3]
temp7 = fl['dewpoint_temperature1']['data'][ind3]
temp8 = fl['Wwind']['data'][ind3]
temp9 = fl['static_pressure']['data'][ind3]
temp10 = fl['mixing_ratio']['data'][ind3]
temp11 = fl['Uwind']['data'][ind3]
temp12 = fl['Vwind']['data'][ind3]
ND = ND[ind3,:]
max_dtime = cit_time2.max()
min_dtime = cit_time2.min()
total_seconds = max_dtime-min_dtime
total_seconds = total_seconds.total_seconds()
dtime_1s = np.zeros(int(total_seconds)-1,dtype=object)
its = dtime_1s.shape[0]/5.
dtime_5s = np.zeros(int(its),dtype=object)
array = np.ma.zeros(int(its))
array2 = np.ma.zeros(int(its))
array3 = np.ma.zeros(int(its))
array4 = np.ma.zeros(int(its))
array5 = np.ma.zeros(int(its))
array6 = np.ma.zeros(int(its))
array7 = np.ma.zeros(int(its))
array8 = np.ma.zeros(int(its))
array9 = np.ma.zeros(int(its))
array10 = np.ma.zeros(int(its))
array11 = np.ma.zeros(int(its))
array12 = np.ma.zeros(int(its))
array13 = np.ma.zeros(int(its))
array14 = np.ma.zeros(int(its))
array15 = np.ma.zeros(int(its))
#create dtime_array monotonic increase but 5 seconds
for i in np.arange(0,int(its)):
dtime_5s[i] = min_dtime + datetime.timedelta(seconds = i*5)
print('time averaging into 5 second averages...')
for i in np.arange(1,dtime_5s.shape[0]):
time_left = dtime_5s[i-1]
time_right = dtime_5s[i]
ind = np.where(cit_time2 >= time_left)
ind2 = np.where(cit_time2 < time_right)
ind3 = np.intersect1d(ind,ind2)
if len(ind3) >= 1:
temp = temp1[ind3]
array[i-1] = np.ma.mean(temp)
temp = temp2[ind3]
array2[i-1] = np.ma.mean(temp)
temp = temp3[ind3]
array3[i-1] = np.ma.mean(temp)
temp = temp4[ind3]
array4[i-1] = np.ma.mean(temp)
temp = temp5[ind3]
array5[i-1] = np.ma.mean(temp)
temp = temp6[ind3]
array6[i-1] = np.ma.mean(temp)
temp = temp7[ind3]
array7[i-1] = np.ma.mean(temp)
temp = temp8[ind3]
array8[i-1] = np.ma.mean(temp)
temp = temp9[ind3]
array9[i-1] = np.ma.mean(temp)
temp = temp10[ind3]
array10[i-1] = np.ma.mean(temp)
temp = temp11[ind3]
array11[i-1] = np.ma.mean(temp)
temp = temp12[ind3]
array12[i-1] = np.ma.mean(temp)
temp = cit_lat[ind3]
array13[i-1] = np.ma.mean(temp)
temp = cit_lon[ind3]
array14[i-1] = np.ma.mean(temp)
temp = cit_alt[ind]
array15[i-1] = np.ma.mean(temp)
else:
array[i-1] = np.ma.masked
array2[i-1] = np.ma.masked
array3[i-1] = np.ma.masked
array4[i-1] = np.ma.masked
array5[i-1] = np.ma.masked
array6[i-1] =np.ma.masked
array7[i-1] = np.ma.masked
array8[i-1] = np.ma.masked
array9[i-1] = np.ma.masked
array10[i-1] = np.ma.masked
array11[i-1] = np.ma.masked
array12[i-1] = np.ma.masked
array13[i-1] = np.ma.masked
array14[i-1] = np.ma.masked
array15[i-1] = np.ma.masked
continue
#pre-allocate arrays
ND_sub_a = np.ma.zeros(ND[0,:].shape)
ND_aver = np.ma.zeros([dtime_5s.shape[0],ND[0,:].shape[0]])
#
ind = np.where(ND < 0)
ND[ind] = np.ma.masked
for i in np.arange(1,dtime_5s.shape[0]):
time_left = dtime_5s[i-1]
time_right = dtime_5s[i]
ind = np.where(cit_time2 >= time_left)
ind2 = np.where(cit_time2 < time_right)
ind3 = np.intersect1d(ind,ind2)
if len(ind3) >= 1:
ND_sub = ND[ind3,:]
for j in np.arange(ND.shape[1]):
ND_sub_a[j] = np.ma.mean(ND_sub[:,j])
ND_aver[i-1,:] = ND_sub_a
else:
ND_aver[i-1,:] = np.ma.masked
#get rid of last point (less than 5 obs needed for average)
temperature_1 = array[:-1]
lwc = array2[:-1]
ice = array3[:-1]
cdp = array4[:-1]
twc = array5[:-1]
iwc = array6[:-1]
td = array7[:-1]
w = array8[:-1]
P = array9[:-1]
mix = array10[:-1]
U = array11[:-1]
V = array12[:-1]
cit_lat = array13[:-1]
cit_lon = array14[:-1]
cit_alt = array15[:-1]
ND_aver = ND_aver[:-1,:]
#In reality our time should be the midpoint of each time interval. I will add 2.5 seconds to the 5s array
cit_time2 = dtime_5s[:-1] + datetime.timedelta(seconds=2.5)
#get rid of masked spatial cit data. Kd tree doesnt liked masked values (i.e. fill_values sneak in)
ind = cit_lon.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
ind = cit_lat.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
ind = cit_alt.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
else:
#no averaging tech.
temperature_1 = fl['temperature']['data'][ind3]
lwc = fl['lwc1']['data'][ind3]
ice = fl['mso_frequency']['data'][ind3]
cdp = fl['Conc_CDP']['data'][ind3]
twc = fl['twc']['data'][ind3]
iwc = fl['Nev_IWC']['data'][ind3]
td = fl['dewpoint_temperature1']['data'][ind3]
w = fl['Wwind']['data'][ind3]
P = fl['static_pressure']['data'][ind3]
mix = fl['mixing_ratio']['data'][ind3]
U = fl['Uwind']['data'][ind3]
V = fl['Vwind']['data'][ind3]
ND = ND[ind3,:]
#
# ND is in cm**-4 and dD+midpoints is in mm
#Find the echotop of Ku at near nadir
print('finding Ku echotop and constraining Cit...')
precip_yn = precip_echo_filt(apr['Ku'][:,12,:])
ind = np.where(precip_yn ==1)
ku_filt = np.squeeze(apr['Ku'][:,12,ind])
alt_filt = np.squeeze(apr['alt_gate'][:,12,ind])
echo = find_echo(ku_filt,alt_filt)
scan = 12
lat_0 = apr['lat'][scan,0]
lon_0 = apr['lon'][scan,0]
p2 = Proj(proj='laea', zone=10, ellps='WGS84',
lat_0=lat_0,
lon_0=lon_0)
x = apr['lon_gate'][:,scan,:]
y = apr['lat_gate'][:,scan,:]
x2,y2 = p2(x,y)
x3,y3 = p2(lon_0,lat_0)
x_c,y_c = p2(cit_lon,cit_lat)
alt_c = cit_alt
x4 = np.array([])
y4 = np.array([])
x2_c = np.array([])
y2_c = np.array([])
for j in np.arange(0,x2.shape[1]):
x4 = np.append(x4,x2[0,j]-x3)
y4 = np.append(y4,y2[0,j]-y3)
for j in np.arange(0,x_c.shape[0]):
x2_c = np.append(x2_c,x_c[j]-x3)
y2_c = np.append(y2_c,y_c[j]-y3)
R = np.sqrt(x4**2+y4**2)/1000.
R_c = np.sqrt(x2_c**2+y2_c**2)/1000.
R_echo = R[ind]
echo_func = interp1d(R_echo,echo,kind='cubic',bounds_error=False)
echo_c = echo_func(R_c)
ind = np.where(alt_c <= echo_c + 50) #can change this threshold, just arbitrary
cit_lon = cit_lon[ind]
cit_lat = cit_lat[ind]
cit_alt = cit_alt[ind]
cit_time2 = cit_time2[ind]
temperature_1 = temperature_1[ind]
lwc = lwc[ind]
ice = ice[ind]
cdp = cdp[ind]
twc = twc[ind]
iwc = iwc[ind]
td = td[ind]
w = w[ind]
P = P[ind]
mix = mix[ind]
U = U[ind]
V = V[ind]
ND_aver = np.squeeze(ND_aver[ind,:])
R_c = R_c[ind]
echo_c = echo_c[ind]
#
if BB:
print('Constraining Cit above BB..')
bb_func = interp1d(R,bb_long,kind='cubic',bounds_error=False)
bb_c = bb_func(R_c)
ind = np.where(cit_alt >= bb_c - 100) #can change this threshold, just arbitrary
cit_lon = cit_lon[ind]
cit_lat = cit_lat[ind]
cit_alt = cit_alt[ind]
cit_time2 = cit_time2[ind]
temperature_1 = temperature_1[ind]
lwc = lwc[ind]
ice = ice[ind]
cdp = cdp[ind]
twc = twc[ind]
iwc = iwc[ind]
td = td[ind]
w = w[ind]
P = P[ind]
mix = mix[ind]
U = U[ind]
V = V[ind]
ND_aver = np.squeeze(ND_aver[ind,:])
R_c = R_c[ind]
echo_c = echo_c[ind]
#
#Mask out warmer than 0 (i.e. when particles melt)
ind = np.where(temperature_1 > 0)
ND_aver[ind,:] = np.ma.masked
#
#Calculate some PSD parameters (could add other things here, i.e. running IGF for Mu,lambda and N0)
rho_tot2,iwc_HY = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,2,twc,return_ice=True) #HYs
rho_tot3,iwc_BF = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,3,twc,return_ice=True) #BF
rho_tot4 = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,4,twc) #BF
dmm_BF = Dmm(ND_aver*1e8,midpoints/1000.,dD/1000.,0)
dmm_HY = Dmm(ND_aver*1e8,midpoints/1000.,dD/1000.,1)
# rho_tot2 = 0
# rho_tot3 =0
# dmm_BF = Dmm(ND_aver/1e8,midpoints/1000.,dD/1000.,0)
# dmm_HY = Dmm(ND_aver/1e8,midpoints/1000.,dD/1000.,1)
#
#Print out number of potential match points
print(cit_lon.shape)
#
#Make 1-D arrays of radar spatial data
apr_x = np.ravel(apr['lon_gate'][:,:,:])
apr_y = np.ravel(apr['lat_gate'][:,:,:])
apr_alt = np.ravel(apr['alt_gate'][:,:,:])
apr_t = np.ravel(apr['time_gate'][:,:,:])
#
#Make 1-D arrays of radar data
apr_ku = np.ma.ravel(apr['Ku'][:,:,:])
apr_ka = np.ma.ravel(apr['Ka'][:,:,:])
apr_w = np.ma.ravel(apr['W'][:,:,:])
#
#If you want to neglect masked gates throw them out here (Speeds things up and gives better results)
#ku
ind = apr_ku.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#ka
ind = apr_ka.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#w
ind = apr_w.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#
#Use projection to get cartiesian distances
apr_x2,apr_y2 = p(apr_x,apr_y)
cit_x2,cit_y2 = p(cit_lon,cit_lat)
#
#Kdtree things (this is where the matchups are found)
kdt = cKDTree(zip(apr_x2, apr_y2, apr_alt), leafsize=leafsize)
prdistance, prind1d = kdt.query(zip(cit_x2,cit_y2,cit_alt),k=query_k, eps=query_eps, p=query_p,
distance_upper_bound=query_distance_upper_bound,n_jobs=query_n_jobs)
#
#if query_k >1 means you are considering more than one gate and an average is needed
if query_k > 1:
#Issue with prind1d being the size of apr_ku... that means that it is outside you allowed upperbound (sphere_size)
ind = np.where(prind1d == apr_ku.shape[0])
if len(ind[0]) > 0 or len(ind[1]) > 0:
print('gate was outside distance upper bound, eliminating those instances')
#mask values outside search area. Actually setting values to 0?
# prind1d = np.ma.masked_where(prind1d == apr_ku.shape[0],prind1d)
# prdistance = np.ma.masked_where(prind1d == apr_ku.shape[0],prdistance)
prind1d[ind] = np.ma.masked
prdistance[ind] = np.ma.masked
if QC:
#Eliminate observations that are outliers before averaging the data (i.e. get rid of skin paints)
Ku_sub = apr_ku[prind1d]
Ku_sub = np.ma.masked_where(prind1d == 0,Ku_sub)
Q_med = np.array([])
Q_max = np.array([])
Q_min = np.array([])
Q_1 = np.array([])
Q_2 = np.array([])
n_1 = np.array([])
for i in | np.arange(Ku_sub.shape[0]) | numpy.arange |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torchvision.utils
import torch.nn.functional as F
from torchvision import models
from customized_utils import (
if_violate_constraints,
customized_standardize,
customized_inverse_standardize,
recover_fields_not_changing,
decode_fields,
is_distinct_vectorized
)
class VanillaDataset(Data.Dataset):
def __init__(self, X, y, one_hot=False, to_tensor=False):
self.X = X
self.y = y
self.one_hot = one_hot
self.to_tensor = to_tensor
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
if self.to_tensor:
return (
torch.from_numpy(np.array(self.X[idx])),
torch.from_numpy(np.array(self.y[idx])),
)
else:
return (self.X[idx], self.y[idx])
class BNN(nn.Module):
def __init__(self, input_size, output_size, device=None):
super(BNN, self).__init__()
# self.layers = nn.Sequential(
# nn.Linear(input_size, 150),
# nn.Dropout(0.5),
# nn.ReLU(),
# # nn.Linear(20, 20),
# # nn.Dropout(0.5),
# # nn.ReLU(),
# nn.Linear(150, output_size),
# nn.Sigmoid())
self.fc1 = nn.Linear(input_size, 150)
self.dropout = nn.Dropout(0.5)
self.relu1 = nn.ReLU()
self.fc_end = nn.Linear(150, output_size)
self.sigmoid = nn.Sigmoid()
if not device:
self.device = torch.device("cuda")
else:
self.device = device
def forward(self, x, return_logits=False):
# x = torch.flatten(x, 1)
# logits = self.layers(x.float())
x = self.fc1(x)
x = self.dropout(x)
x = self.relu1(x)
x = self.fc_end(x)
logits = self.sigmoid(x)
return logits
def predict(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.round(out)
return out.cpu().detach().numpy()
def predict_proba(self, x):
if isinstance(x, np.ndarray):
is_numpy = True
else:
is_numpy = False
if is_numpy:
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.stack([1 - out, out], dim=1).squeeze()
# print(out.cpu().detach().numpy().shape)
if is_numpy:
return out.cpu().detach().numpy()
else:
return out
class SimpleNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes, device=None):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.relu2 = nn.ReLU()
self.fc_end = nn.Linear(hidden_size, num_classes)
self.sigmoid = nn.Sigmoid()
if not device:
self.device = torch.device("cuda")
else:
self.device = device
def extract_embed(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
return out.cpu().detach().numpy()
def forward(self, x):
out = self.fc1(x)
out = self.relu1(out)
out = self.fc2(out)
out = self.relu2(out)
out = self.fc_end(out)
out = self.sigmoid(out)
return out
def predict(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.round(out)
return out.cpu().detach().numpy()
def predict_proba(self, x):
if isinstance(x, np.ndarray):
is_numpy = True
else:
is_numpy = False
if is_numpy:
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.stack([1 - out, out], dim=1).squeeze()
# print(out.cpu().detach().numpy().shape)
if is_numpy:
return out.cpu().detach().numpy()
else:
return out
# class SimpleNet(nn.Module):
# def __init__(self, input_size, hidden_size, num_classes, device=None):
# super(SimpleNet, self).__init__()
# self.fc1 = nn.Linear(input_size, hidden_size)
# self.tanh = nn.Tanh()
# self.fc_last = nn.Linear(hidden_size, num_classes)
# self.sigmoid = nn.Sigmoid()
#
# if not device:
# self.device = torch.device("cuda")
# else:
# self.device = device
# def extract_embed(self, x):
# x = torch.from_numpy(x).to(self.device).float()
# out = self.fc1(x)
# out = self.tanh(out)
# return out.cpu().detach().numpy()
# def forward(self, x):
# out = self.fc1(x)
# out = self.tanh(out)
# out = self.fc_last(out)
# out = self.sigmoid(out)
# return out
# def predict(self, x):
# x = torch.from_numpy(x).to(self.device).float()
# out = self.forward(x)
# out = torch.round(out)
# return out.cpu().detach().numpy()
# def predict_proba(self, x):
# x = torch.from_numpy(x).to(self.device).float()
# out = self.forward(x)
#
# out = torch.stack([1 - out, out], dim=1).squeeze()
# # print(out.cpu().detach().numpy().shape)
# return out.cpu().detach().numpy()
class SimpleNetMulti(SimpleNet):
def predict(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.argmax(out)
return out.cpu().detach().numpy()
def predict_proba(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
return out.cpu().detach().numpy()
class SimpleRegressionNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes, device=None):
super(SimpleRegressionNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.tanh = nn.Tanh()
self.fc2 = nn.Linear(hidden_size, num_classes)
if not device:
self.device = torch.device("cuda")
else:
self.device = device
def extract_embed(self, x):
out = self.fc1(x)
out = self.tanh(out)
return out
def forward(self, x):
out = self.fc1(x)
out = self.tanh(out)
out = self.fc2(out)
return out
def predict(self, x):
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
return out.cpu().detach().numpy()
def predict_proba(self, x):
if isinstance(x, np.ndarray):
is_numpy = True
else:
is_numpy = False
if is_numpy:
x = torch.from_numpy(x).to(self.device).float()
out = self.forward(x)
out = torch.stack([out, 20-out], dim=1).squeeze()
# print(out.cpu().detach().numpy().shape)
if is_numpy:
return out.cpu().detach().numpy()
else:
return out
def extract_embed(model, X, device_name=None):
if not device:
self.device = torch.device("cuda")
else:
self.device = torch.device(device_name)
X_torch = torch.from_numpy(X).to(device).float()
output = model.extract_embed(X_torch)
return output.cpu().detach().numpy()
def validation(model, test_loader, device, one_hot=True, regression=False):
mean_loss = []
mean_acc = []
model.eval()
if regression:
one_hot = False
criterion = nn.MSELoss()
else:
criterion = nn.BCELoss()
for i, (x_batch, y_batch) in enumerate(test_loader):
x_batch = x_batch.to(device).float()
y_batch = y_batch.to(device).float()
if one_hot:
y_batch = y_batch.long()
y_pred_batch = model(x_batch).squeeze()
loss = criterion(y_pred_batch, y_batch)
diff_np = torch.abs(y_pred_batch - y_batch).cpu().detach().numpy()
loss_np = loss.cpu().detach().numpy()
y_batch_np = y_batch.cpu().detach().numpy()
y_pred_batch_np = y_pred_batch.cpu().detach().numpy()
acc = np.mean(np.round(y_pred_batch_np) == y_batch_np)
mean_loss.append(loss_np)
mean_acc.append(acc)
# print('test', y_pred_batch, y_batch, loss_np, acc)
mean_loss = np.mean(mean_loss)
mean_acc = np.mean(mean_acc)
return mean_loss, mean_acc, diff_np
def pgd_attack(
model,
images,
labels,
xl,
xu,
encoded_fields,
labels_used,
customized_constraints,
standardize,
prev_X=[],
base_ind=0,
unique_coeff=None,
mask=None,
param_for_recover_and_decode=None,
device_name=None,
eps=1.01,
adv_conf_th=0,
attack_stop_conf=1,
alpha=1 / 255,
iters=255,
max_projections_steps=3,
associated_clf_id=[],
X_test_pgd_ori=[],
consider_uniqueness=False
):
if len(associated_clf_id) > 0:
print(len(model))
print(associated_clf_id)
multiple_models = True
else:
multiple_models = False
if not device_name:
device = torch.device("cuda")
else:
device = torch.device(device_name)
n = len(images)
encoded_fields_len = int(np.sum(encoded_fields))
images_all = torch.from_numpy(images).to(device).float()
labels_all = torch.from_numpy(labels).to(device).float()
ori_images_all = torch.clone(images_all)
xl = torch.from_numpy(xl).to(device).float()
xu = torch.from_numpy(xu).to(device).float()
loss = nn.BCELoss()
new_images_all = []
new_outputs_all = []
prev_x_all = []
initial_outputs_all = []
if consider_uniqueness:
(
X_removed,
kept_fields,
removed_fields,
enc,
inds_to_encode,
inds_non_encode,
encoded_fields,
xl_ori,
xu_ori,
unique_bugs_len,
) = param_for_recover_and_decode
p, c, th = unique_coeff
if_low_conf_examples = np.zeros(n)
# we deal with images sequentially
for j in range(n):
images = torch.unsqueeze(images_all[j], 0)
labels = labels_all[j]
ori_images = torch.unsqueeze(ori_images_all[j], 0)
if encoded_fields_len > 0:
ori_images_encode = ori_images[:, :encoded_fields_len]
ori_images_non_encode = ori_images[:, encoded_fields_len:]
prev_outputs = torch.zeros(1).to(device).float()
prev_images = []
current_x = []
prev_x = []
max_violate_times = 10
violate_times = 0
if multiple_models:
model_id = associated_clf_id[j]
cur_model = model[model_id]
print("adv_conf_th", adv_conf_th)
if type(adv_conf_th) == type([]) and len(adv_conf_th) > 0:
cur_adv_conf_th = adv_conf_th[model_id]
print("cur_adv_conf_th 1", cur_adv_conf_th)
else:
cur_adv_conf_th = adv_conf_th
print("cur_adv_conf_th 2", cur_adv_conf_th)
if type(attack_stop_conf) == type([]) and len(attack_stop_conf) > 0:
cur_attack_stop_conf = attack_stop_conf[model_id]
else:
cur_attack_stop_conf = attack_stop_conf
print("model_id", model_id)
else:
cur_model = model
cur_adv_conf_th = adv_conf_th
cur_attack_stop_conf = attack_stop_conf
for i in range(iters):
images.requires_grad = True
outputs = cur_model(images).squeeze()
cur_model.zero_grad()
cost = loss(outputs, labels).to(device)
cost.backward()
outputs_np = outputs.squeeze().cpu().detach().numpy()
# print('\n'*2)
# print(i, outputs_np)
# print('\n'*2)
if i == 0:
initial_outputs_all.append(outputs_np)
print("\n", j, "initial outputs", outputs_np, "\n")
# check uniqueness of new x
distinct = True
if consider_uniqueness:
ind = base_ind + j
current_x = images.squeeze().cpu().detach().numpy()
current_x = customized_inverse_standardize(
np.array([current_x]), standardize, encoded_fields_len, True
)[0]
current_x = recover_fields_not_changing(
np.array([current_x]),
np.array(X_removed[ind]),
kept_fields,
removed_fields,
)[0]
current_x = decode_fields(
np.array([current_x]),
enc,
inds_to_encode,
inds_non_encode,
encoded_fields,
adv=True,
)[0]
if len(prev_x_all) > 0:
prev_X_and_prev_x_all = np.concatenate([prev_X, prev_x_all])
else:
prev_X_and_prev_x_all = prev_X
if len(X_test_pgd_ori) > 1 and j < n-1:
prev_X_and_prev_x_all = np.concatenate([prev_X_and_prev_x_all, X_test_pgd_ori[j+1:]])
remaining_inds = is_distinct_vectorized(
[current_x], prev_X_and_prev_x_all, mask, xl_ori, xu_ori, p, c, th, verbose=False
)
if len(remaining_inds) == 1:
distinct = True
else:
distinct = False
# if new x is close to previous X or forward prob not improving, break
cond1 = not distinct and i > 0
cond2 = (outputs - prev_outputs) < 1e-3
cond4 = (
i > 0 and prev_outputs.cpu().detach().numpy() >= cur_attack_stop_conf
)
# print('prev_outputs.cpu().detach().numpy()', prev_outputs.cpu().detach().numpy())
if cond1 or cond2 or cond4:
if cond1:
print("cond1 with step", i)
elif cond2:
print("cond2 with step", i)
elif cond4:
print("cond4 with step", i)
break
else:
# print('update x with the current one')
prev_images = torch.clone(images)
prev_outputs = torch.clone(outputs)
prev_x = current_x
if i == 0 and prev_outputs.cpu().detach().numpy() > cur_adv_conf_th:
print("cond3 with step", i)
if_low_conf_examples[j] = 1
print(
"num_of_high_conf_examples",
np.sum(if_low_conf_examples),
"/",
j + 1,
)
break
adv_images = images + alpha * images.grad.sign()
# print('images.grad', images.grad.cpu().detach().numpy(), '\n'*2)
eta = adv_images - ori_images
# print('ori_images', ori_images.cpu().detach().numpy(), '\n'*2)
# print('\n'*2, 'eta', eta.cpu().detach().numpy(), '\n'*2)
# eta[:, :encoded_fields_len] = torch.clip(eta[:, :encoded_fields_len], min=-eps, max=eps)
# print('eps', eps)
# print('\n'*2, 'eta clipped', eta.cpu().detach().numpy(), '\n'*2)
eta = torch.clip(eta, min=-eps, max=eps)
# print('\n'*2, 'eta clipped 2', eta.cpu().detach().numpy(), '\n'*2)
# print('\n'*2, 'xl', xl.cpu().detach().numpy(), '\n'*2)
# print('\n'*2, 'xu', xu.cpu().detach().numpy(), '\n'*2)
eta = eta * (xu - xl)
# print('\n'*2, 'eta * (xu - xl)', eta.cpu().detach().numpy(), '\n'*2)
images = torch.max(torch.min(ori_images + eta, xu), xl).detach_()
one_hotezed_images_embed = torch.zeros(
[images.shape[0], encoded_fields_len]
)
s = 0
for field_len in encoded_fields:
max_inds = torch.argmax(images[:, s : s + field_len], axis=1)
one_hotezed_images_embed[
torch.arange(images.shape[0]), s + max_inds
] = 1
# print(images.cpu().detach().numpy())
# print(field_len, max_inds.cpu().detach().numpy())
# print(one_hotezed_images_embed.cpu().detach().numpy())
s += field_len
images[:, :encoded_fields_len] = one_hotezed_images_embed
images_non_encode = images[:, encoded_fields_len:]
images_delta_non_encode = images_non_encode - ori_images_non_encode
xl_non_encode_np = xl[encoded_fields_len:].squeeze().cpu().numpy()
xu_non_encode_np = xu[encoded_fields_len:].squeeze().cpu().numpy()
# keep checking violation, exit only when satisfying
ever_violate = False
images_non_encode_np = images_non_encode.squeeze().cpu().numpy()
ori_images_non_encode_np = ori_images_non_encode.squeeze().cpu().numpy()
images_delta_non_encode_np = images_delta_non_encode.squeeze().cpu().numpy()
satisfy_constraints = False
for k in range(max_projections_steps):
# print('images_non_encode_np', images_non_encode_np.shape)
images_non_encode_np_inv_std = customized_inverse_standardize(
np.array([images_non_encode_np]),
standardize,
encoded_fields_len,
False,
)[0]
if_violate, [
violated_constraints,
involved_labels,
] = if_violate_constraints(
images_non_encode_np_inv_std,
customized_constraints,
labels_used,
verbose=False,
)
# if violate, pick violated constraints, project perturbation back to linear constraints via LR
if if_violate:
ever_violate = True
# print(len(images_delta_non_encode_np), m)
# print(images_delta_non_encode_np)
images_delta_non_encode_np_inv_std = customized_inverse_standardize(
np.array([images_delta_non_encode_np]),
standardize,
encoded_fields_len,
False,
True,
)
new_images_delta_non_encode_np_inv_std = project_into_constraints(
images_delta_non_encode_np_inv_std[0],
violated_constraints,
labels_used,
involved_labels,
device=device
)
# print(ori_images.squeeze().cpu().numpy())
# print(images_delta_non_encode_np_inv_std[0])
# print(new_images_delta_non_encode_np_inv_std)
else:
satisfy_constraints = True
break
new_images_delta_non_encode_np = customized_standardize(
np.array([new_images_delta_non_encode_np_inv_std]),
standardize,
encoded_fields_len,
False,
True,
)[0]
# print(new_images_delta_non_encode_np.shape, new_images_delta_non_encode_np.shape)
images_non_encode_np = (
ori_images_non_encode_np + new_images_delta_non_encode_np
)
# print('-- check violation before clip')
# images_non_encode_np_inv_std_tmp = customized_inverse_standardize(np.array([images_non_encode_np]), standardize, m, False)[0]
# _, _ = if_violate_constraints(images_non_encode_np_inv_std_tmp, customized_constraints, labels_used, verbose=True)
# print(images_non_encode_np_inv_std_tmp)
# print('++ check violation before clip')
eta = np.clip(
images_non_encode_np - ori_images_non_encode_np, -eps, eps
)
# eta *= (1/(violate_times+1))
images_non_encode_np = np.maximum(
np.minimum(ori_images_non_encode_np + eta, xu_non_encode_np),
xl_non_encode_np,
)
# print('-- check violation after clip')
images_non_encode_np_inv_std_tmp = customized_inverse_standardize(
np.array([images_non_encode_np]),
standardize,
encoded_fields_len,
False,
)[0]
if_violate_after_clip, _ = if_violate_constraints(
images_non_encode_np_inv_std_tmp,
customized_constraints,
labels_used,
verbose=False,
)
# print(images_non_encode_np_inv_std_tmp)
# print('++ check violation after clip')
if if_violate_after_clip:
satisfy_constraints = False
# print(ori_images_non_encode_np)
# print(new_images_delta_non_encode_np)
# print(images_non_encode_np)
# ori_images_non_encode_np_inv_std = customized_inverse_standardize(np.array([ori_images_non_encode_np]), standardize, m, False)[0]
# images_non_encode_np_inv_std = customized_inverse_standardize(np.array([images_non_encode_np]), standardize, m, False)[0]
# print(standardize.mean_, standardize.scale_)
# print(ori_images_non_encode_np_inv_std)
# print(new_images_delta_non_encode_np_inv_std)
# print(images_non_encode_np_inv_std)
if not satisfy_constraints or violate_times > max_violate_times:
break
if ever_violate:
violate_times += 1
# print('ever_violate', ever_violate, m)
images_non_encode = torch.from_numpy(images_non_encode_np).to(device)
images[:, encoded_fields_len:] = images_non_encode
# if i == iters - 1:
# print('iter', i, ':', 'cost :', cost.cpu().detach().numpy(), 'outputs :', outputs.cpu().detach().numpy())
print(
"\n", "final outputs", prev_outputs.squeeze().cpu().detach().numpy(), "\n"
)
if len(prev_images) > 0:
new_images_all.append(prev_images.squeeze().cpu().detach().numpy())
new_outputs_all.append(prev_outputs.squeeze().cpu().detach().numpy())
prev_x_all.append(prev_x)
print("\n" * 2)
print("num_of_high_conf_examples", | np.sum(if_low_conf_examples) | numpy.sum |
import glob
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import time
from scipy.stats import norm
SSD_GRAPH_FILE = 'frozen_models/frozen_sim_mobile/frozen_inference_graph.pb'
tl = {'1': 'Green', '2': 'Red', '3': 'Yellow' , '4' : 'OFF' }
class inference():
def init(self):
"""Loads a frozen inference graph"""
self.confidence_cutoff = None
self.graph= None
self.image_tensor = None
self.detection_boxes = None
self.detection_scores = None
self.detection_classes = None
def init2(self, graph_file= SSD_GRAPH_FILE):
self.graph = self.load_graph(graph_file)
self.init_tensors()
self.confidence_cutoff = 0.8
def init_tensors(self):
self.graph = self.load_graph()
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = self.graph.get_tensor_by_name('detection_classes:0')
def load_graph(self, graph_file = SSD_GRAPH_FILE):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def to_image_coords(self, boxes, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1].
This converts it back to the original coordinate based on the image
size.
"""
box_coords = | np.zeros_like(boxes) | numpy.zeros_like |
from bayes_filter.filters import FreeTransitionSAEM
import tensorflow as tf
import tensorflow_probability as tfp
import os
from bayes_filter.misc import load_array_file
from bayes_filter import float_type
import sys
from bayes_filter.feeds import IndexFeed,TimeFeed,CoordinateFeed, DataFeed, init_feed, ContinueFeed
from bayes_filter.coord_transforms import tf_coord_transform, itrs_to_enu_with_references
from bayes_filter.kernels import DTECIsotropicTimeGeneralODE, DTECIsotropicTimeGeneral
import astropy.time as at
import astropy.coordinates as ac
import astropy.units as au
from bayes_filter.frames import ENU
import numpy as np
import pylab as plt
from scipy.spatial import cKDTree
import seaborn as sns
from timeit import default_timer
from bayes_filter.settings import angle_type, dist_type
def arrays():
return os.path.dirname(sys.modules["bayes_filter"].__file__)
def lofar_array(arrays):
lofar_array = os.path.join(arrays, 'arrays/lofar.hba.antenna.cfg')
return load_array_file(lofar_array)
def lofar_array2(arrays):
lofar_array = os.path.join(arrays, 'arrays/lofar.hba.antenna.cfg')
res = load_array_file(lofar_array)
return res[0][[0,48,49,50, 51]], res[1][[0,48,49,50,51],:]
def simulated_ddtec(tf_session, lofar_array):
class Simulated:
def __init__(self):
ref_ant = lofar_array[1][0,:]
Nt, Nd, Na, Nf = 1, 20, len(lofar_array[0])-1, 6
with tf_session.graph.as_default():
index_feed = IndexFeed(Nt)
obstime_init = at.Time("2018-01-01T00:00:00.000", format='isot')
times = obstime_init.mjd*86400. + tf.cast(tf.linspace(0., Nt*30., Nt)[:, None],float_type)
time_feed = TimeFeed(index_feed, times)
cont_feed = ContinueFeed(time_feed)
enu = ENU(location=ac.ITRS(*ref_ant * au.m), obstime=obstime_init)
up = ac.SkyCoord(east=0., north=0., up=1., frame=enu).transform_to('icrs')
M = 20
self.M = M
ra_vec = np.linspace(up.ra.rad - 2. * np.pi / 180., up.ra.rad + 0. * np.pi / 180., M)
dec_vec = np.linspace(up.dec.rad - 2. * np.pi / 180., up.dec.rad + 2. * np.pi / 180., M)
ra, dec = np.meshgrid(ra_vec, dec_vec, indexing='ij')
ra = ra.flatten()[:, None]
dec = dec.flatten()[:, None]
Nd = ra.shape[0]
Xd = tf.concat([ra, dec], axis=1)
Xa = tf.constant(lofar_array[1][1:,:], dtype=float_type)
coord_feed = CoordinateFeed(time_feed, Xd, Xa,
coord_map=tf_coord_transform(itrs_to_enu_with_references(ref_ant, [up.ra.rad, up.dec.rad], ref_ant)))
ra_vec = np.linspace(up.ra.rad - 2. * np.pi / 180., up.ra.rad + 2. * np.pi / 180., M)
dec_vec = np.linspace(up.dec.rad - 2. * np.pi / 180., up.dec.rad + 2. * np.pi / 180., M)
ra, dec = np.meshgrid(ra_vec, dec_vec, indexing='ij')
ra = ra.flatten()[:, None]
dec = dec.flatten()[:, None]
Nd_screen = ra.shape[0]
Xd_screen = tf.concat([ra, dec], axis=1)
star_coord_feed = CoordinateFeed(time_feed, Xd_screen, Xa,
coord_map=tf_coord_transform(itrs_to_enu_with_references(ref_ant, [up.ra.rad, up.dec.rad], ref_ant)))
init, next = init_feed(coord_feed)
init_star, next_star = init_feed(star_coord_feed)
init_cont, cont = init_feed(cont_feed)
Xd_screen, Xd, _,_,_ = tf_session.run([Xd_screen, Xd, init, init_cont, init_star])
kern = DTECIsotropicTimeGeneral(variance=1e-4,timescale=45.,lengthscales=5., a=500., b=60.,
fed_kernel='RBF',obs_type='DDTEC', squeeze=True, kernel_params={'resolution':3})
# kern = tfp.positive_semidefinite_kernels.ExponentiatedQuadratic(tf.convert_to_tensor(0.04,float_type), tf.convert_to_tensor(10.,float_type))
self.slice_size = Nt * Xd_screen.shape[0] * Xa.shape[0] + Nt * Xd.shape[0] * Xa.shape[0]
kd = cKDTree(Xd)
self.nearest, idx = kd.query(Xd_screen, k=1)
self.nearest *= 180./np.pi
from timeit import default_timer
t0 = default_timer()
Y_real, Y_imag = [],[]
Y_real_star, Y_imag_star = [], []
ddtec_true, ddtec_star = [],[]
while True:
K,N = tf_session.run([kern.K(tf.concat([next,next_star],axis=0)),tf.shape(next)[0]])
s = np.mean(np.diag(K))
L = np.sqrt(s)*np.linalg.cholesky(K/s+1e-6*np.eye(K.shape[-1]))
np.random.seed(0)
ddtec = np.einsum('ab,b->a',L, np.random.normal(size=L.shape[1]))
ddtec_true.append(ddtec[:N])
ddtec_star.append(ddtec[N:])
freqs = np.linspace(110.e6, 160.e6, Nf)
Y_real.append(np.cos(-8.448e9 * ddtec[:N,None]/freqs))
Y_imag.append(np.sin(-8.448e9 * ddtec[:N, None] / freqs))
Y_real_star.append(np.cos(-8.448e9 * ddtec[N:, None] / freqs))
Y_imag_star.append(np.sin(-8.448e9 * ddtec[N:, None] / freqs))
if not tf_session.run(cont):
break
self.Y_real_star = np.concatenate(Y_real_star,axis=0).reshape((Nt, Nd_screen, Na, Nf))
self.Y_imag_star = np.concatenate(Y_imag_star, axis=0).reshape((Nt, Nd_screen, Na, Nf))
Y_real_true = np.concatenate(Y_real,axis=0).reshape((Nt, Nd, Na, Nf))
Y_real = Y_real_true + 0.26*np.random.normal(size=Y_real_true.shape)
# Y_real[Nt//2:Nt//2 + 5, ...] *= 0.5
Y_imag_true = np.concatenate(Y_imag, axis=0).reshape((Nt, Nd, Na, Nf))
Y_imag = Y_imag_true + 0.26 * np.random.normal(size=Y_imag_true.shape)
# Y_imag[Nt // 2:Nt // 2 + 5, ...] *= 0.5
self.freqs = freqs
self.ddtec_true = np.concatenate(ddtec_true,axis=0).reshape((Nt, Nd, Na))
self.ddtec_star = np.concatenate(ddtec_star, axis=0).reshape((Nt, Nd_screen, Na))
self.Y_real = Y_real
self.Y_imag = Y_imag
self.Y_real_true = Y_real_true
self.Y_imag_true = Y_imag_true
# self.np_freqs = tf_session.run(freqs)
self.np_times = tf_session.run(times)
self.ddtec = ddtec
self.coord_feed = coord_feed
self.star_coord_feed = star_coord_feed
self.data_feed = DataFeed(index_feed, Y_real, Y_imag, event_size=1)
return Simulated()
if __name__ == '__main__':
from tensorflow.python import debug as tf_debug
sess = tf.Session(graph=tf.Graph())
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with sess.graph.as_default():
simulated_ddtec = simulated_ddtec(sess, lofar_array2(arrays()))
free_transition = FreeTransitionSAEM(
simulated_ddtec.freqs,
simulated_ddtec.data_feed,
simulated_ddtec.coord_feed,
simulated_ddtec.star_coord_feed)
filtered_res, inits = free_transition.filter_step(
num_samples=2000, num_chains=2,parallel_iterations=10, num_leapfrog_steps=3,target_rate=0.6,
num_burnin_steps=1000,num_saem_samples=2000,saem_maxsteps=0,initial_stepsize=7e-3,
init_kern_params={'y_sigma':0.5,'variance':1e-4,'timescale':45.,'lengthscales':5., 'a':500., 'b':60.},
which_kernel=0, kernel_params={'resolution':3}, saem_batchsize=500,
slice_size=simulated_ddtec.slice_size)
sess.run(inits[0])
sess.run(inits[1])
sess.run(inits[2])
cont = True
while cont:
res = sess.run(filtered_res)
# print("post_logp", res.post_logp,"test_logp", res.test_logp)
print("rhat:",np.percentile(res.rhat,[10,50,90]), res.rhat)
plt.hist(res.rhat, bins = int(np.sqrt(len(res.rhat))))
plt.show()
# plt.plot(res.step_sizes)
# plt.show()
# plt.hist(res.ess.flatten(),bins=100)
# plt.show()
times = simulated_ddtec.np_times[:,0]
ddtec_true = simulated_ddtec.ddtec_true
ddtec_star = simulated_ddtec.ddtec_star
Y_real_star = simulated_ddtec.Y_real_star
Y_imag_star = simulated_ddtec.Y_imag_star
# plt.plot(times, res.Y_imag[1,:,0,1,0],c='black',lw=2.)
# plt.fill_between(times, res.Y_imag[0,:,0,1,0], res.Y_imag[2,:,0,1,0],alpha=0.5)
# plt.plot(times, res.extra.Y_imag_data[:, 0, 1, 0], c='red', lw=1.)
# plt.plot(times, simulated_ddtec.Y_imag_true[:, 0, 1, 0], c='green', lw=1.)
# plt.show()
vmin, vmax = np.percentile(res.dtec_star[1, ...], [5, 95])
plt.style.use('ggplot')
fig, axs = plt.subplots(1+(simulated_ddtec.Y_imag_true.shape[2]), 2, figsize=(8,4*(simulated_ddtec.Y_imag_true.shape[2])+4))
ax1,ax2 = axs[0]
ax1.imshow(res.dtec[1, 0, :, 1].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax1.set_title("Model space solution")
ax2.imshow(res.dtec[1, 0, :, 1].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax2.set_title("Data space solution")
ax2.legend()
for i in range(simulated_ddtec.Y_imag_true.shape[2]):
ax3,ax4 = axs[i+1]
ax3.imshow(res.dtec_star[1, 0, :, i].reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax3.set_title("Model space solution*")
ax4.imshow((ddtec_star[0, :, i]).reshape((simulated_ddtec.M,simulated_ddtec.M)),vmin=vmin,vmax=vmax)
ax4.set_title("True model*")
plt.show()
error = np.sqrt(np.square(res.Y_imag_star[1, :, :, :, :]-simulated_ddtec.Y_imag_star[:, :, :, :]).mean(3).mean(2).mean(0))
plt.scatter(simulated_ddtec.nearest,error)
x = simulated_ddtec.nearest[:, None]
a, _, _, _ = np.linalg.lstsq(x, error)
plt.plot(x, a * x, 'r-')
plt.show()
error = np.sqrt(
np.square(res.Y_real_star[1, :, :, :, :] - simulated_ddtec.Y_real_star[:, :, :, :]).mean(3).mean(
2).mean(0))
plt.scatter(simulated_ddtec.nearest, error)
x = simulated_ddtec.nearest[:, None]
a, _, _, _ = | np.linalg.lstsq(x, error) | numpy.linalg.lstsq |
#! /usr/bin/env python3
import numpy as np
import argparse
from scipy.sparse.linalg import svds
from sklearn.metrics import adjusted_rand_score as ari
from scipy.sparse import coo_matrix
import dcsbm
## Parser to give parameter values
parser = argparse.ArgumentParser()
parser.add_argument("-M", type=int, dest="M", default=25, const=True, nargs="?",\
help="Integer: number of simulations, default M.")
parser.add_argument("-s", type=int, dest="s", default=171171, const=True, nargs="?",\
help="Integer: seed, default 171171.")
## Parse arguments
args = parser.parse_args()
#############################################################
## Reproduces results in Section 6.1 for bipartite DCScBMs ##
#############################################################
## Arguments
ns = [100, 200, 500, 1000, 2000]
ns_prime = [150, 300, 750, 1500, 3000]
M_sim = args.M
K = 2
K_prime = 3
m = 10
## Set maximum number of nodes
n = int(np.max(ns))
n_max = int(np.max(ns))
n_prime = int(np.max(ns_prime))
n_max_prime = int(np.max(ns_prime))
## Summary
print('Number of nodes:', str(n_max))
print('Number of communities:', str(K))
## Obtain maximum
def find_max(x):
qq = np.where(x == np.max(x))
qq0 = qq[0][0] + 1
qq1 = qq[1][0] + 2
return np.array([qq0, qq1])
## Set seed to repeat the simulation
np.random.seed(111)
## Set seed
q = np.array([int(x) for x in np.linspace(0,n,num=K,endpoint=False)])
q_prime = np.array([int(x) for x in np.linspace(0,n_prime,num=K_prime,endpoint=False)])
z = np.zeros(n,dtype=int)
z_prime = np.zeros(n_prime,dtype=int)
for k in range(K):
z[q[k]:] = k
for k in range(K_prime):
z_prime[q_prime[k]:] = k
## Randomly shuffle
np.random.seed(171171)
np.random.shuffle(z)
np.random.shuffle(z_prime)
## BICs and ARIs
bics = {}
aris = {}
bics_prime = {}
aris_prime = {}
for t in [None, 'normalised', 'theta']:
for s in range(M_sim):
for n in ns:
bics[t,s,n] = | np.zeros(shape=(5,5)) | numpy.zeros |
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import random
import re
import os
import glob
import sys
from time import time
from helpers import evaluation
class MFBase(object):
'''Base class for methods based on matrix factorization
'''
def __init__(self, reg=0.0025, learning_rate=0.05, annealing=1., init_sigma=1):
self.name = 'Base for matrix factorization'
self.reg = reg
self.learning_rate = learning_rate # self.learning_rate will change due to annealing.
self.init_learning_rate = learning_rate # self.init_learning_rate keeps the original value (for filename)
self.annealing_rate = annealing
self.init_sigma = init_sigma
self.max_length = np.inf # For compatibility with the RNNs
self.metrics = {'recall': {'direction': 1},
'sps': {'direction': 1},
'user_coverage': {'direction': 1},
'item_coverage': {'direction': 1},
'ndcg': {'direction': 1},
'blockbuster_share': {'direction': -1}
}
def prepare_model(self, dataset):
'''Must be called before using train, load or top_k_recommendations
'''
self.dataset = dataset
self.n_items = dataset.n_items
self.n_users = dataset.n_users
# print('self.dataset:', self.dataset)
# print('self.n_items:', self.n_items)
# print('self.n_users:', self.n_users)
def change_data_format(self, dataset):
'''Gets a generator of data in the sequence format and save data in the csr format
'''
self.users = np.zeros((self.n_users, 2), dtype=np.int32)
self.items = np.zeros(dataset.training_set.n_interactions, dtype=np.int32)
# print('self.users:', self.users.shape, self.users)
# print('self.items:', self.items.shape, self.items)
# print('dataset.training_set.filename:', dataset.training_set.filename)
cursor = 0
with open(dataset.training_set.filename, 'r') as f:
for sequence in f:
sequence = sequence.split()
# print('sequence:', sequence)
# print('sequence[1::3]:', sequence[1::3])
items = map(int, sequence[1::3])
self.users[int(sequence[0]), :] = [cursor, len(items)]
self.items[cursor:cursor + len(items)] = items
cursor += len(items)
def get_pareto_front(self, metrics, metrics_names):
costs = np.zeros((len(metrics[metrics_names[0]]), len(metrics_names)))
for i, m in enumerate(metrics_names):
costs[:, i] = np.array(metrics[m]) * self.metrics[m]['direction']
is_efficient = | np.ones(costs.shape[0], dtype=bool) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Unit tests for the spike_train_correlation module.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
import neo
import elephant.conversion as conv
import elephant.spike_train_correlation as sc
class corrcoeff_TestCase(unittest.TestCase):
def setUp(self):
# These two arrays must be such that they do not have coincidences
# spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
self.test_array_1d_0 = [
1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
self.test_array_1d_1 = [
1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
# Build spike trains
self.st_0 = neo.SpikeTrain(
self.test_array_1d_0, units='ms', t_stop=50.)
self.st_1 = neo.SpikeTrain(
self.test_array_1d_1, units='ms', t_stop=50.)
# And binned counterparts
self.binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
binsize=1 * pq.ms)
def test_corrcoef_binned(self):
'''
Test result of a correlation coefficient between two binned spike
trains.
'''
# Calculate clipped and unclipped
res_clipped = sc.corrcoef(
self.binned_st, binary=True)
res_unclipped = sc.corrcoef(
self.binned_st, binary=False)
# Check dimensions
self.assertEqual(len(res_clipped), 2)
self.assertEqual(len(res_unclipped), 2)
# Check result unclipped against result calculated from scratch for
# the off-diagonal element
mat = self.binned_st.to_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
# Check result clipped against result calculated from scratch for
# the off-diagonal elemant
mat = self.binned_st.to_bool_array()
mean_0 = np.mean(mat[0])
mean_1 = np.mean(mat[1])
target_from_scratch = \
np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
np.sqrt(
np.dot(mat[0] - mean_0, mat[0] - mean_0) *
np.dot(mat[1] - mean_1, mat[1] - mean_1))
# Check result unclipped against result calculated by numpy.corrcoef
target_numpy = np.corrcoef(mat)
self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
def test_corrcoef_binned_same_spiketrains(self):
'''
Test if the correlation coefficient between two identical binned spike
trains evaluates to a 2x2 matrix of ones.
'''
# Calculate correlation
binned_st = conv.BinnedSpikeTrain(
[self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
binsize=1 * pq.ms)
target = sc.corrcoef(binned_st)
# Check dimensions
self.assertEqual(len(target), 2)
# Check result
| assert_array_equal(target, 1.) | numpy.testing.utils.assert_array_equal |
import os
import copy
import math
import numpy as np
import gurobipy as gp
from gurobipy import GRB
def save_checkpoint(model, where):
try:
model_check_point = np.array([abs(var.x) for var in model.getVars()])
np.save(os.path.join("sol", model.ModelName), model_check_point)
except:
pass
class Model:
def __init__(self, model_name, input, output, problem_cnt):
print("Creating model: {}".format(model_name))
self.m = gp.Model(name=model_name)
self.problem = problem_cnt.split(".")[0]
self.data = copy.deepcopy(input)
self.L_cnt = len(self.data.sets.L)
self.N_cnt = len(self.data.sets.N)
self.M_cnt = max(self.data.sets.M)
self.T_cnt = self.data.parameters.T
self.output = copy.deepcopy(output)
def __cal_obj_numpy(self, x):
return (
np.max(np.max(x + self.data.parameters.D - 1, axis=1))
+ np.sum(
self.data.parameters.W * np.max(x + self.data.parameters.D - 1, axis=1)
)
* self.window
)
def __get_sol_result_params(self, path):
try:
saved_model_params = np.load(path)
x_saved = np.empty((self.N_cnt, self.M_cnt)).astype("int")
y_saved = np.zeros((self.N_cnt, self.M_cnt, self.T_cnt)).astype("int")
tmp_T_cnt = (
int(
(len(saved_model_params) - (1 + self.N_cnt))
/ self.N_cnt
/ self.M_cnt
)
- 1
)
npy_idx = 1 + self.N_cnt
for n in range(self.N_cnt):
for m in range(self.M_cnt):
x_saved[n][m] = saved_model_params[npy_idx]
npy_idx += 1
for n in range(self.N_cnt):
for m in range(self.M_cnt):
for t in range(tmp_T_cnt):
y_saved[n][m][t] = saved_model_params[npy_idx]
npy_idx += 1
return x_saved, y_saved
except:
return None, None
def gen_operations_order(self, problem_prefix):
x_saved, _ = self.__get_sol_result_params(
os.path.join("sol", "{}.sol.npy".format(problem_prefix))
)
results = []
for n in range(self.N_cnt):
for m in range(self.M_cnt):
if self.data.parameters.S[n][m]:
results.append(
[
n,
m,
x_saved[n][m],
self.data.parameters.S[n][m],
self.data.parameters.D[n][m],
[],
]
)
return results
def pre_solve(self, window, sort_num=1):
print("Running presolve...")
print("window = {}".format(window))
self.window = window
self.data.parameters.D = (
np.ceil(np.array(self.data.parameters.D) / window).astype("int").tolist()
)
"""
選所有 jobs 中 W 最大的
選沒被 block 且 S 夠的當中 D 最小的 operation
"""
dp = np.zeros((self.T_cnt, self.L_cnt)).astype("int")
x = | np.empty((self.N_cnt, self.M_cnt)) | numpy.empty |
import cvxpy as cvx
import numpy as np
import proximal as px
from proximal.algorithms import admm, pc, hqs, ladmm, absorb_offset
from proximal.tests.base_test import BaseTest
class TestAlgs(BaseTest):
def test_admm(self):
"""Test ADMM algorithm.
"""
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5)) * 1.
prox_fns = [px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, [], 1.0, eps_abs=1e-4, eps_rel=1e-4)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = admm.solve(prox_fns, [], 1.0)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, [], 1.0, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
psi_fns, omega_fns = admm.partition(prox_fns)
sltn = admm.solve(psi_fns, omega_fns, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X, b=B)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
# With parameters for px.sum_squares
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X, b=B, alpha=0.1, beta=2., gamma=1, c=B)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = 0.1 * cvx.sum_squares(2 * cvx_X - B) + cvx.sum_squares(cvx_X) + \
cvx.norm(cvx_X, 1) + cvx.trace(B.T * cvx_X)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
prox_fns = [px.norm1(X)]
quad_funcs = [px.sum_squares(X - B, alpha=0.1, beta=2., gamma=1, c=B)]
quad_funcs[0] = absorb_offset(quad_funcs[0])
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value, places=3)
prox_fns = [px.norm1(X)]
cvx_X = cvx.Variable(10, 5)
# With linear operators.
kernel = np.array([1, 2, 3])
x = px.Variable(3)
b = np.array([-41, 413, 2])
prox_fns = [px.nonneg(x), px.sum_squares(px.conv(kernel, x), b=b)]
sltn = admm.solve(prox_fns, [], 1.0, eps_abs=1e-5, eps_rel=1e-5)
kernel_mat = np.matrix("2 1 3; 3 2 1; 1 3 2")
cvx_X = cvx.Variable(3)
cost = cvx.norm(kernel_mat * cvx_X - b)
prob = cvx.Problem(cvx.Minimize(cost), [cvx_X >= 0])
prob.solve()
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
self.assertAlmostEqual(np.sqrt(sltn), prob.value, places=2)
prox_fns = [px.nonneg(x)]
quad_funcs = [px.sum_squares(px.conv(kernel, x), b=b)]
sltn = admm.solve(prox_fns, quad_funcs, 1.0, eps_abs=1e-5, eps_rel=1e-5)
self.assertItemsAlmostEqual(x.value, cvx_X.value, places=2)
self.assertAlmostEqual(np.sqrt(sltn), prob.value, places=2)
def test_pock_chambolle(self):
"""Test pock chambolle algorithm.
"""
X = px.Variable((10, 5))
B = np.reshape(np.arange(50), (10, 5))
prox_fns = [px.sum_squares(X, b=B)]
sltn = pc.solve(prox_fns, [], 1.0, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, B, places=2)
self.assertAlmostEqual(sltn, 0)
prox_fns = [px.norm1(X, b=B, beta=2)]
sltn = pc.solve(prox_fns, [], 1.0, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5)
self.assertItemsAlmostEqual(X.value, B / 2., places=2)
self.assertAlmostEqual(sltn, 0, places=2)
prox_fns = [px.norm1(X), px.sum_squares(X, b=B)]
sltn = pc.solve(prox_fns, [], 0.5, 1.0, 1.0, eps_rel=1e-5, eps_abs=1e-5)
cvx_X = cvx.Variable(10, 5)
cost = cvx.sum_squares(cvx_X - B) + cvx.norm(cvx_X, 1)
prob = cvx.Problem(cvx.Minimize(cost))
prob.solve()
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
psi_fns, omega_fns = pc.partition(prox_fns)
sltn = pc.solve(psi_fns, omega_fns, 0.5, 1.0, 1.0,
eps_abs=1e-5, eps_rel=1e-5)
self.assertItemsAlmostEqual(X.value, cvx_X.value, places=2)
self.assertAlmostEqual(sltn, prob.value)
# With linear operators.
kernel = np.array([1, 2, 3])
kernel_mat = | np.matrix("2 1 3; 3 2 1; 1 3 2") | numpy.matrix |
from cho_util.math.common import *
import numpy as np
def from_matrix(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-2] + (4,))
m00, m01, m02 = [x[..., 0, i] for i in range(3)]
m10, m11, m12 = [x[..., 1, i] for i in range(3)]
m20, m21, m22 = [x[..., 2, i] for i in range(3)]
np.subtract(m21, m12, out=out[..., 0])
np.subtract(m02, m20, out=out[..., 1])
np.subtract(m10, m01, out=out[..., 2])
out[..., :3] = uvec(out[..., :3])
out[..., 3] = np.arccos(np.clip((m00 + m11 + m22 - 1)*0.5, -1.0, 1.0))
return out
def from_quaternion(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-1] + (4,))
qw = x[..., 3:]
out[..., :3] = uvec(x[..., :3])
out[..., 3:] = 2 * np.arccos(np.clip(qw, -1.0, 1.0))
return out
def from_euler(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-1] + (4,))
x, y, z = [x[..., i] for i in range(3)]
x0 = np.cos(x)
x6 = np.sin(x)
x3 = np.cos(y)
x4 = np.sin(y)
x5 = np.cos(z)
x1 = np.sin(z)
x2 = x0*x1
x10 = x1*x6
x7 = x5*x6
x11 = x0*x5
x8 = x1*x3 + x2 - x4*x7
x9 = -x2*x4 + x3*x6 + x7
x12 = x10 + x11*x4 + x4
out[..., 0] = x9
out[..., 1] = x12
out[..., 2] = x8
out[..., :3] = uvec(out[..., :3])
out[..., 3] = np.arccos(
np.clip(0.5 * (x0*x3 + x10*x4 + x11 + x3*x5 - 1), -1.0, 1.0))
return out
def from_axis_angle(x, out=None):
x = np.asarray(x)
if out is None:
out = np.empty(shape=np.shape(x)[:-1] + (4,))
np.copyto(out, x)
return out
def rotate(r, x, out=None):
x = | np.asarray(x) | numpy.asarray |
#! -*- coding:utf-8 -*-
# 三元组抽取任务,基于GlobalPointer的仿TPLinker设计
# 文章介绍:https://kexue.fm/archives/8888
# 数据集:http://ai.baidu.com/broad/download?dataset=sked
import json
from bert4torch.layers import GlobalPointer
from bert4torch.tokenizers import Tokenizer
from bert4torch.models import build_transformer_model, BaseModel
from bert4torch.snippets import sequence_padding, Callback, ListDataset
from bert4torch.losses import SparseMultilabelCategoricalCrossentropy
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import numpy as np
maxlen = 128
batch_size = 24
config_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/pytorch_model.bin'
dict_path = 'F:/Projects/pretrain_ckpt/bert/[google_tf_base]--chinese_L-12_H-768_A-12/vocab.txt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 加载标签字典
predicate2id, id2predicate = {}, {}
with open('F:/Projects/data/corpus/relation_extraction/BD_Knowledge_Extraction/all_50_schemas', encoding='utf-8') as f:
for l in f:
l = json.loads(l)
if l['predicate'] not in predicate2id:
id2predicate[len(predicate2id)] = l['predicate']
predicate2id[l['predicate']] = len(predicate2id)
# 建立分词器
tokenizer = Tokenizer(dict_path, do_lower_case=True)
# 加载数据集
class MyDataset(ListDataset):
@staticmethod
def load_data(filename):
"""加载数据
单条格式:{'text': text, 'spo_list': [(s, p, o)]}
"""
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
l = json.loads(l)
D.append({'text': l['text'],
'spo_list': [(spo['subject'], spo['predicate'], spo['object']) for spo in l['spo_list']]})
return D
def collate_fn(batch):
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
batch_token_ids, batch_segment_ids = [], []
batch_entity_labels, batch_head_labels, batch_tail_labels = [], [], []
for d in batch:
token_ids, segment_ids = tokenizer.encode(d['text'], maxlen=maxlen)
# 整理三元组 {s: [(o, p)]}
spoes = set()
for s, p, o in d['spo_list']:
s = tokenizer.encode(s)[0][1:-1]
p = predicate2id[p]
o = tokenizer.encode(o)[0][1:-1]
sh = search(s, token_ids)
oh = search(o, token_ids)
if sh != -1 and oh != -1:
spoes.add((sh, sh + len(s) - 1, p, oh, oh + len(o) - 1))
# 构建标签
entity_labels = [set() for _ in range(2)]
head_labels = [set() for _ in range(len(predicate2id))]
tail_labels = [set() for _ in range(len(predicate2id))]
for sh, st, p, oh, ot in spoes:
entity_labels[0].add((sh, st))
entity_labels[1].add((oh, ot))
head_labels[p].add((sh, oh))
tail_labels[p].add((st, ot))
for label in entity_labels + head_labels + tail_labels:
if not label: # 至少要有一个标签
label.add((0, 0)) # 如果没有则用0填充
entity_labels = sequence_padding([list(l) for l in entity_labels]) # [subject/object=2, 实体个数, 实体起终点]
head_labels = sequence_padding([list(l) for l in head_labels]) # [关系个数, 该关系下subject/object配对数, subject/object起点]
tail_labels = sequence_padding([list(l) for l in tail_labels]) # [关系个数, 该关系下subject/object配对数, subject/object终点]
# 构建batch
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_entity_labels.append(entity_labels)
batch_head_labels.append(head_labels)
batch_tail_labels.append(tail_labels)
batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device)
batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device)
# batch_entity_labels: [btz, subject/object=2, 实体个数, 实体起终点]
# batch_head_labels: [btz, 关系个数, 该关系下subject/object配对数, subject/object起点]
# batch_tail_labels: [btz, 关系个数, 该关系下subject/object配对数, subject/object终点]
batch_entity_labels = torch.tensor(sequence_padding(batch_entity_labels, seq_dims=2), dtype=torch.float, device=device)
batch_head_labels = torch.tensor(sequence_padding(batch_head_labels, seq_dims=2), dtype=torch.float, device=device)
batch_tail_labels = torch.tensor(sequence_padding(batch_tail_labels, seq_dims=2), dtype=torch.float, device=device)
return [batch_token_ids, batch_segment_ids], [batch_entity_labels, batch_head_labels, batch_tail_labels]
train_dataloader = DataLoader(MyDataset('F:/Projects/data/corpus/relation_extraction/BD_Knowledge_Extraction/train_data.json'),
batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
valid_dataset = MyDataset('F:/Projects/data/corpus/relation_extraction/BD_Knowledge_Extraction/dev_data.json')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, collate_fn=collate_fn)
# 定义bert上的模型结构
class Model(BaseModel):
def __init__(self) -> None:
super().__init__()
self.bert = build_transformer_model(config_path, checkpoint_path)
self.entity_output = GlobalPointer(hidden_size=768, heads=2, head_size=64)
self.head_output = GlobalPointer(hidden_size=768, heads=len(predicate2id), head_size=64, RoPE=False, tril_mask=False)
self.tail_output = GlobalPointer(hidden_size=768, heads=len(predicate2id), head_size=64, RoPE=False, tril_mask=False)
def forward(self, inputs):
hidden_states = self.bert(inputs) # [btz, seq_len, hdsz]
mask = inputs[0].gt(0).long()
entity_output = self.entity_output(hidden_states, mask) # [btz, heads, seq_len, seq_len]
head_output = self.head_output(hidden_states, mask) # [btz, heads, seq_len, seq_len]
tail_output = self.tail_output(hidden_states, mask) # [btz, heads, seq_len, seq_len]
return entity_output, head_output, tail_output
model = Model().to(device)
class MyLoss(SparseMultilabelCategoricalCrossentropy):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, y_preds, y_trues):
''' y_preds: [Tensor], shape为[btz, heads, seq_len ,seq_len]
'''
loss_list = []
for y_pred, y_true in zip(y_preds, y_trues):
shape = y_pred.shape
# 乘以seq_len是因为(i, j)在展开到seq_len*seq_len维度对应的下标是i*seq_len+j
y_true = y_true[..., 0] * shape[2] + y_true[..., 1] # [btz, heads, 实体起终点的下标]
y_pred = y_pred.reshape(shape[0], -1, np.prod(shape[2:])) # [btz, heads, seq_len*seq_len]
loss = super().forward(y_pred, y_true.long())
loss = torch.mean(torch.sum(loss, dim=1))
loss_list.append(loss)
return {'loss': sum(loss_list)/3, 'entity_loss': loss_list[0], 'head_loss': loss_list[1], 'tail_loss': loss_list[2]}
model.compile(loss=MyLoss(mask_zero=True), optimizer=optim.Adam(model.parameters(), 1e-5), metrics=['entity_loss', 'head_loss', 'tail_loss'])
def extract_spoes(text, threshold=0):
"""抽取输入text所包含的三元组
"""
tokens = tokenizer.tokenize(text, maxlen=maxlen)
mapping = tokenizer.rematch(text, tokens)
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
token_ids = torch.tensor([token_ids], dtype=torch.long, device=device)
segment_ids = torch.tensor([segment_ids], dtype=torch.long, device=device)
outputs = model.predict([token_ids, segment_ids])
outputs = [o[0].cpu().numpy() for o in outputs] # [heads, seq_len, seq_len]
# 抽取subject和object
subjects, objects = set(), set()
outputs[0][:, [0, -1]] -= float('inf')
outputs[0][:, :, [0, -1]] -= float('inf')
for l, h, t in zip(*np.where(outputs[0] > threshold)):
if l == 0:
subjects.add((h, t))
else:
objects.add((h, t))
# 识别对应的predicate
spoes = set()
for sh, st in subjects:
for oh, ot in objects:
p1s = | np.where(outputs[1][:, sh, oh] > threshold) | numpy.where |
import json
from collections import Counter
import numpy as np
from sentence_retrieval.sent_tfidf import OnlineTfidfDocRanker
from utils import fever_db, check_sentences, text_clean
import config
import drqa_yixin.tokenizers
from drqa_yixin.tokenizers import CoreNLPTokenizer
from tqdm import tqdm
from utils import c_scorer
import math
import utils
from collections import namedtuple
from pathlib import Path
from utils import common
path_stanford_corenlp_full_2017_06_09 = str(config.PRO_ROOT / 'dep_packages/stanford-corenlp-full-2017-06-09/*')
print(path_stanford_corenlp_full_2017_06_09)
drqa_yixin.tokenizers.set_default('corenlp_classpath', path_stanford_corenlp_full_2017_06_09)
tok = CoreNLPTokenizer(annotators=['pos', 'lemma'])
def easy_tokenize(text):
return tok.tokenize(text_clean.normalize(text)).words()
def load_data(file):
d_list = []
with open(file, encoding='utf-8', mode='r') as in_f:
for line in in_f:
item = json.loads(line.strip())
d_list.append(item)
return d_list
def utest_for_ground_truth(d_list):
nei_c = 0
support_c = 0
refute_c = 0
for item in tqdm(d_list):
e_list = check_sentences.check_and_clean_evidence(item)
evidence_sent_id = []
gt_evidence = []
if item['verifiable'] == "VERIFIABLE":
for doc_id, ln in list(e_list)[0]:
evidence_sent_id.append(doc_id + c_scorer.SENT_LINE + str(ln))
gt_evidence.append([doc_id, ln])
elif item['verifiable'] == "NOT VERIFIABLE":
evidence_sent_id = []
item["predicted_sentids"] = evidence_sent_id
# item['predicted_evidence'] = []
item['predicted_evidence'] = gt_evidence
item['predicted_label'] = item["label"]
if item["label"] == 'NOT ENOUGH INFO':
nei_c += 1
elif item["label"] == 'SUPPORTS':
support_c += 1
elif item["label"] == 'REFUTES':
refute_c += 1
print(support_c, refute_c, nei_c)
# if len(evidence_sent_id) >= 2:
# print(evidence_sent_id)
def utest_score_ground_truth():
d_list = load_data(config.FEVER_DEV_JSONL)
utest_for_ground_truth(d_list)
eval_mode = {'check_sent_id_correct': True, 'standard': True}
print(c_scorer.fever_score(d_list, d_list, mode=eval_mode, verbose=False))
def utest_check_sentence_lines():
sent_number_coutner = Counter()
number_list = []
db_cursor = fever_db.get_cursor()
# d_list = load_data("/Users/Eason/RA/FunEver/results/doc_retri/2018_07_04_21:56:49_r/dev.jsonl")
d_list = load_data("/Users/Eason/RA/FunEver/results/doc_retri/2018_07_04_21:56:49_r/train.jsonl")
for item in tqdm(d_list):
p_docids = item['predicted_docids']
current_sent_list = []
for doc_id in p_docids:
r_list = fever_db.get_all_sent_by_doc_id(db_cursor, doc_id)
current_sent_list.extend(r_list)
sent_number_coutner.update([len(current_sent_list)])
number_list.append(len(current_sent_list))
# print(current_sent_list)
print(len(number_list))
print('Mean:', np.mean(number_list))
print('Max:', np.max(number_list))
print('Min:', np.min(number_list))
print('Std:', | np.std(number_list) | numpy.std |
#%%
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
batch_size = 100
train_dataset = torchvision.datasets.MNIST(
root = './data'
,train = True
,transform = transforms.ToTensor()
,download = True
)
test_dataset = torchvision.datasets.MNIST(
root = './data'
,train = False
,transform = transforms.ToTensor()
)
train_loader = torch.utils.data.DataLoader(
dataset = train_dataset
,batch_size = batch_size
,shuffle = True
)
test_loader = torch.utils.data.DataLoader(
dataset = test_dataset
,batch_size = batch_size
,shuffle = False
)
print("load data done")
# define parameters
# w1 is the weight matrix for hidden layer 1, 16 x 784
# 16 layer1 hidden neurals, 784 numbers
# the last column is the bias
learning_rate = 0.001
num_epoch = 10
input_size = 28*28 # 784
layer1_size = 128
layer2_size = 64
output_size = 10
# w1 = np.random.uniform(low = 0,high=1,size=(layer1_size,input_size))#.astype(np.float32)*np.sqrt(1. / layer1_size)
# w2 = np.random.uniform(low = 0,high=1,size=(layer2_size,layer1_size))#.astype(np.float32)*np.sqrt(1. / layer2_size)
# w3 = np.random.uniform(low = 0,high=1,size=(output_size,layer2_size))#.astype(np.float32)*np.sqrt(1. / output_size)
w1 = np.random.randn(layer1_size,input_size) * np.sqrt(1./layer1_size)
b1 = np.random.randn(layer1_size,1) * np.sqrt(1./layer1_size)
w2 = np.random.randn(layer2_size,layer1_size) * np.sqrt(1./layer2_size)
b2 = np.random.randn(layer2_size,1) * np.sqrt(1./layer2_size)
w3 = np.random.randn(output_size,layer2_size) * np.sqrt(1./output_size)
b3 = np.random.randn(output_size,1) * np.sqrt(1./output_size)
def acc():
total = 0
correct = 0
for i,(images,labels) in enumerate(test_loader):
images = images.numpy()
labels = labels.numpy()
for i,image in enumerate(images):
total +=1
# prepare raw image data
image = image[0].flatten() # channel 0
image = np.reshape(image,(input_size,-1))
# forward to hidden layer 1
y_pred_layer1_z = forward(w1,image,b1)
y_pred_layer1_a = sigmoid(y_pred_layer1_z)
# forward to hidden layer 2
y_pred_layer2_z = forward(w2,y_pred_layer1_a,b2)
y_pred_layer2_a = sigmoid(y_pred_layer2_z)
#y_pred_layer2 = y_pred_layer2*down_rate
# output
y_pred_z = forward(w3,y_pred_layer2_a,b3)
y_pred = softmax(y_pred_z).flatten()
y_pred = np.argmax(y_pred)
y = labels[i]
#print(f"y_pred:{y_pred} | y: {y}")
if y_pred == y : correct +=1
return correct / total
def forward(w,x,b):
return np.dot(w,x) + b
def sigmoid(x, derivative=False):
if derivative:
return (np.exp(-x))/((np.exp(-x)+1)**2)
return 1/(1 + np.exp(-x))
def softmax(x, derivative=False):
# Numerically stable with large exponentials
exps = np.exp(x - x.max())
if derivative:
return exps / np.sum(exps, axis=0) * (1 - exps / np.sum(exps, axis=0))
return exps / | np.sum(exps, axis=0) | numpy.sum |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs eval metrics for the shilling attack experiment in Section 4."""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=dangerous-default-value
# pylint: disable=invalid-name
# pylint: disable=C6204
import collections
import copy
import json
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
# User-defined hyperparameters for the experiment. These should match the first
# three user parameters in polblogs_experiment.py
SAVE_DIR = 'experiment_data/shilling'
NUMBER_OF_EXPERIMENTS = 10
# Copy line 739 and 742 from shilling_experiment.py
methods = ['deepwalk', 'glove', 'monet0', 'monet', 'random', 'nlp']
DB_LEVELS = [v / 100.0 for v in list(range(75, 100, 5)) + [50, 25]]
################################################################################
# Register results saving directory
EVAL_SAVE_DIR = os.path.join(SAVE_DIR, 'exp_results')
if not os.path.isdir(EVAL_SAVE_DIR):
os.mkdir(EVAL_SAVE_DIR)
# Helper function to get method name from debias (DB) level
monet_alpha_encoder = lambda x: 'monet%0.2f' % x
# Register names of methods and display names
methods.extend([monet_alpha_encoder(db_level) for db_level in DB_LEVELS])
replace_dict = {
'deepwalk': 'DeepWalk',
'monet0': 'GloVe_meta',
'monet': 'MONET_G',
'random': 'Random',
'glove': 'GloVe',
'nlp': 'NLP'
}
def movielens_result_2d( # pylint: disable=dangerous-default-value, missing-docstring
df,
cpalette,
ppalette,
figsize=(13, 10),
title='Attacked Vids in Top-20 vs MRR-Lift, k=20',
xtitle=None,
ytitle=None,
ignore_methods=['Random', 'Adversary', 'MONET_G-0.75', 'MONET_G-0.25'],
x_col='MRR@k / random-MRR@k',
x_subtitle='(higher better)',
y_col='Attacked Vids in Top-20',
y_subtitle='(lower better)',
method_col='Method',
annotate_size=26.0,
title_size=40.0,
ax_label_size=28.0,
ax_tick_size=26.0,
legend_text_size=26.0,
xlim=(3.0, 8.0),
ylim=(-0.5, 11.0),
markersize=300,
legend_markersize=18,
text_loff1=0.7,
text_uoff1=0.1,
text_loff2=0.35,
text_uoff2=0.25,
legpos='lower right',
filename=None):
if xtitle is None:
xtitle = x_col
if ytitle is None:
ytitle = y_col
method_names = colors_palette.keys()
# General figure specs
_ = plt.figure(figsize=figsize)
plt.rc('axes', titlesize=title_size) # fontsize of the axes title
plt.rc('axes', labelsize=ax_label_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=ax_tick_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=ax_tick_size) # fontsize of the tick labels
plt.rc('legend', fontsize=legend_text_size) # legend fontsize
plt.suptitle(title, fontsize=title_size)
plt.title('')
plt.xlim(xlim)
plt.ylim(ylim)
plt.xlabel(xtitle)
custom_points = []
# Plotting individual results
for m in method_names:
if m not in ignore_methods:
x_mean = numpy.mean(df[df[method_col] == m][x_col])
y_mean = numpy.mean(df[df[method_col] == m][y_col])
plt.scatter(
x=x_mean,
y=y_mean,
marker=ppalette[m],
color=cpalette[m],
s=markersize)
plt.xlabel('%s\n%s' % (xtitle, x_subtitle))
plt.ylabel('%s\n%s' % (ytitle, y_subtitle))
if 'MONET' in m:
if m == 'MONET_G':
text = r'$\lambda$=1.00'
custom_points.append(
Line2D([0], [0],
color='w',
marker=ppalette[m],
markerfacecolor=cpalette[m],
label=m,
markersize=legend_markersize))
else:
text = r'$\lambda$=%s' % m[-4:]
if m[-2:] == '50':
plt.annotate(
text, (x_mean - text_loff2, y_mean + text_uoff2),
size=annotate_size)
else:
plt.annotate(
text, (x_mean - text_loff1, y_mean + text_uoff1),
size=annotate_size)
else:
custom_points.append(
Line2D([0], [0],
color='w',
marker=ppalette[m],
markerfacecolor=cpalette[m],
label=m,
markersize=legend_markersize))
# Plot GloVe_meta again
m = 'GloVe_meta'
x_mean = numpy.mean(df[df[method_col] == m][x_col])
y_mean = numpy.mean(df[df[method_col] == m][y_col])
plt.scatter(
x=x_mean,
y=y_mean,
marker=ppalette[m],
color=cpalette[m],
s=markersize)
plt.legend(
handles=custom_points,
loc=legpos,
numpoints=1,
shadow=True,
fancybox=False)
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
# Load results and create master list
exp_result_list = []
for experiment_number in range(NUMBER_OF_EXPERIMENTS):
exp_save_dir = os.path.join(SAVE_DIR, 'experiment%d' % experiment_number)
with open(os.path.join(exp_save_dir, '%d.txt' % experiment_number)) as f:
exp_result = json.loads(f.read())
exp_result_list.append(exp_result)
result_df = pd.DataFrame(exp_result_list)
# Create timing and embedding distance CIs
distcorr_dict = collections.defaultdict(list)
time_dict = collections.defaultdict(list)
for exp_result in exp_result_list:
for method in methods:
if '.' not in method:
distcorr_dict[method].append(exp_result['%s_vs_glove_distcorr' % method])
if method not in ['nlp', 'random']:
time_dict[method].append(exp_result['%s_time' % method])
# Change dict names to display names
for method in methods:
if method in time_dict:
time_dict[replace_dict[method]] = time_dict[method]
del time_dict[method]
if method in distcorr_dict:
distcorr_dict[replace_dict[method]] = distcorr_dict[method]
del distcorr_dict[method]
def m_pm_s3(m, ss):
return '%0.3f $\pm$ %0.3f' % (m, ss) # pylint: disable=anomalous-backslash-in-string
def m_pm_sint(m, ss):
return '%d $\pm$ %d' % (m, ss) # pylint: disable=anomalous-backslash-in-string
def two_col_float_with_std(name, mm1, ss1, mm2, ss2):
if numpy.isnan(mm2):
string2 = 'N/A'
else:
string2 = m_pm_sint(round(mm2), round(ss2))
return '%s & %s & %s \\\\' % (name, m_pm_s3(mm1, ss1), string2)
flines = []
for method in methods:
if '.' not in method:
m1 = s1 = m2 = s2 = numpy.nan
if replace_dict[method] in distcorr_dict:
m1 = numpy.mean(distcorr_dict[replace_dict[method]])
s1 = numpy.std(distcorr_dict[replace_dict[method]])
if replace_dict[method] in time_dict:
m2 = | numpy.mean(time_dict[replace_dict[method]]) | numpy.mean |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
a = 0.1
I1 = 1
h = 0.025
monte_carlo = 1000
th = np.linspace(0, 2*np.pi, monte_carlo)
def Ax_aux(x, z, th):
f1 = 1/(np.sqrt(x**2 + a**2 - 2*a*x* | np.cos(th) | numpy.cos |
import sys
import numpy as np
inf = float('inf')
h, n, *ab = map(int, sys.stdin.read().split())
a, b = np.array(ab, dtype=np.int64).reshape(n, 2).T
def main():
dp = np.zeros(h+1, dtype=np.int64)
for i in range(1, h+1):
dp[i] = np.amin(dp[ | np.maximum(i-a, 0) | numpy.maximum |
import os
import numpy as np
import vtk
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
import SimpleITK as sitk
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "vtk_utils"))
from vtk_utils import *
import csv
def extract_surface(poly):
connectivity = vtk.vtkPolyDataConnectivityFilter()
connectivity.SetInputData(poly)
connectivity.ColorRegionsOn()
connectivity.SetExtractionModeToAllRegions()
connectivity.Update()
poly = connectivity.GetOutput()
return poly
def surface_distance(p_surf, g_surf):
dist_fltr = vtk.vtkDistancePolyDataFilter()
dist_fltr.SetInputData(1, p_surf)
dist_fltr.SetInputData(0, g_surf)
dist_fltr.SignedDistanceOff()
dist_fltr.Update()
distance = vtk_to_numpy(dist_fltr.GetOutput().GetPointData().GetArray('Distance'))
return distance, dist_fltr.GetOutput()
def evaluate_poly_distances(poly, gt, NUM):
# compute assd and hausdorff distances
assd_list, haus_list, poly_list = [], [], []
poly =extract_surface(poly)
for i in range(NUM):
poly_i = thresholdPolyData(poly, 'Scalars_', (i+1, i+1),'cell')
if poly_i.GetNumberOfPoints() == 0:
print("Mesh based methods.")
poly_i = thresholdPolyData(poly, 'RegionId', (i, i), 'point')
gt_i = thresholdPolyData(gt, 'Scalars_', (i+1, i+1),'cell')
print("DEBUG: ", poly_i.GetNumberOfPoints(), gt_i.GetNumberOfPoints())
pred2gt_dist, pred2gt = surface_distance(gt_i, poly_i)
gt2pred_dist, gt2pred = surface_distance(poly_i, gt_i)
assd = (np.mean(pred2gt_dist)+np.mean(gt2pred_dist))/2
haus = max(np.max(pred2gt_dist), np.max(gt2pred_dist))
assd_list.append(assd)
haus_list.append(haus)
poly_list.append(pred2gt)
poly_dist = appendPolyData(poly_list)
# whole heart
pred2gt_dist, pred2gt = surface_distance(gt, poly)
gt2pred_dist, gt2pred = surface_distance(poly, gt)
assd = (np.mean(pred2gt_dist)+np.mean(gt2pred_dist))/2
haus = max(np.max(pred2gt_dist), np.max(gt2pred_dist))
assd_list.insert(0, assd)
haus_list.insert(0, haus)
print(assd_list)
print(haus_list)
return assd_list, haus_list, poly_dist
def dice_score(pred, true):
pred = pred.astype(np.int)
true = true.astype(np.int)
num_class = np.unique(true)
#change to one hot
dice_out = [None]*len(num_class)
for i in range(1, len(num_class)):
pred_c = pred == num_class[i]
true_c = true == num_class[i]
dice_out[i] = np.sum(pred_c*true_c)*2.0 / (np.sum(pred_c) + np.sum(true_c))
mask =( pred > 0 )+ (true > 0)
dice_out[0] = np.sum((pred==true)[mask]) * 2. / (np.sum(pred>0) + np.sum(true>0))
return dice_out
def jaccard_score(pred, true):
pred = pred.astype(np.int)
true = true.astype(np.int)
num_class = np.unique(true)
#change to one hot
jac_out = [None]*len(num_class)
for i in range(1, len(num_class)):
pred_c = pred == num_class[i]
true_c = true == num_class[i]
jac_out[i] = np.sum(pred_c*true_c) / (np.sum(pred_c) + np.sum(true_c)-np.sum(pred_c*true_c))
mask =( pred > 0 )+ (true > 0)
jac_out[0] = np.sum((pred==true)[mask]) / (np.sum(pred>0) + | np.sum(true>0) | numpy.sum |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import numpy as np
from astropy.io import fits
from astropy.units import Quantity
from collections import OrderedDict
from .utils import unpack_seq
from .geom import pix_tuple_to_idx, axes_pix_to_coord
from .utils import interp_to_order
from .wcsmap import WcsGeom
from .wcsmap import WcsMap
from .reproject import reproject_car_to_hpx, reproject_car_to_wcs
__all__ = [
'WcsNDMap',
]
class WcsNDMap(WcsMap):
"""Representation of a N+2D map using WCS with two spatial dimensions
and N non-spatial dimensions.
This class uses an ND numpy array to store map values. For maps with
non-spatial dimensions and variable pixel size it will allocate an
array with dimensions commensurate with the largest image plane.
Parameters
----------
geom : `~gammapy.maps.WcsGeom`
WCS geometry object.
data : `~numpy.ndarray`
Data array. If none then an empty array will be allocated.
dtype : str, optional
Data type, default is float32
meta : `~collections.OrderedDict`
Dictionary to store meta data.
unit : str or `~astropy.units.Unit`
The map unit
"""
def __init__(self, geom, data=None, dtype='float32', meta=None, unit=''):
# TODO: Figure out how to mask pixels for integer data types
# Shape in WCS or FITS order is `shape`, in Numpy axis order is `shape_np`
shape = tuple([np.max(geom.npix[0]), np.max(geom.npix[1])] +
[ax.nbin for ax in geom.axes])
shape_np = shape[::-1]
if data is None:
data = self._make_default_data(geom, shape_np, dtype)
elif data.shape != shape_np:
raise ValueError('Wrong shape for input data array. Expected {} '
'but got {}'.format(shape_np, data.shape))
super(WcsNDMap, self).__init__(geom, data, meta, unit)
@staticmethod
def _make_default_data(geom, shape_np, dtype):
# Check whether corners of each image plane are valid
coords = []
if not geom.is_regular:
for idx in np.ndindex(geom.shape):
pix = (np.array([0.0, float(geom.npix[0][idx] - 1)]),
np.array([0.0, float(geom.npix[1][idx] - 1)]))
pix += tuple([np.array(2 * [t]) for t in idx])
coords += geom.pix_to_coord(pix)
else:
pix = (np.array([0.0, float(geom.npix[0] - 1)]),
np.array([0.0, float(geom.npix[1] - 1)]))
pix += tuple([np.array(2 * [0.0]) for i in range(geom.ndim - 2)])
coords += geom.pix_to_coord(pix)
if np.all(np.isfinite(np.vstack(coords))):
if geom.is_regular:
data = np.zeros(shape_np, dtype=dtype)
else:
data = | np.full(shape_np, np.nan, dtype=dtype) | numpy.full |
"""Test cases for the blocks environment."""
import numpy as np
from predicators.src import utils
from predicators.src.envs.blocks import BlocksEnv
def test_blocks():
"""Tests for BlocksEnv class."""
utils.reset_config({"env": "blocks"})
env = BlocksEnv()
clear = env._block_is_clear # pylint: disable=protected-access
for task in env.get_train_tasks():
for obj in task.init:
assert len(obj.type.feature_names) == len(task.init[obj])
for task in env.get_test_tasks():
for obj in task.init:
assert len(obj.type.feature_names) == len(task.init[obj])
assert len(env.predicates) == 5
assert {pred.name for pred in env.goal_predicates} == {"On", "OnTable"}
assert len(env.options) == 3
assert len(env.types) == 2
block_type = [t for t in env.types if t.name == "block"][0]
assert env.action_space.shape == (4, )
assert abs(env.action_space.low[0] - BlocksEnv.x_lb) < 1e-3
assert abs(env.action_space.high[0] - BlocksEnv.x_ub) < 1e-3
assert abs(env.action_space.low[1] - BlocksEnv.y_lb) < 1e-3
assert abs(env.action_space.high[1] - BlocksEnv.y_ub) < 1e-3
assert abs(env.action_space.low[2]) < 1e-3
assert abs(env.action_space.low[3]) < 1e-3
assert abs(env.action_space.high[3] - 1) < 1e-3
for i, task in enumerate(env.get_test_tasks()):
state = task.init
robot = None
for item in state:
if item.type != block_type:
robot = item
continue
assert not (state.get(item, "held") and clear(item, state))
assert robot is not None
if i == 0:
# Force initial pick to test rendering with holding
Pick = [o for o in env.options if o.name == "Pick"][0]
block = sorted([o for o in state if o.type.name == "block" and \
clear(o, state)])[0]
act = Pick.ground([robot, block], np.zeros(0)).policy(state)
state = env.simulate(state, act)
env.render_state(state, task, caption="caption")
def test_blocks_failure_cases():
"""Tests for the cases where simulate() is a no-op."""
utils.reset_config({"env": "blocks"})
env = BlocksEnv()
Pick = [o for o in env.options if o.name == "Pick"][0]
Stack = [o for o in env.options if o.name == "Stack"][0]
PutOnTable = [o for o in env.options if o.name == "PutOnTable"][0]
On = [o for o in env.predicates if o.name == "On"][0]
OnTable = [o for o in env.predicates if o.name == "OnTable"][0]
block_type = [t for t in env.types if t.name == "block"][0]
robot_type = [t for t in env.types if t.name == "robot"][0]
block0 = block_type("block0")
block1 = block_type("block1")
block2 = block_type("block2")
robot = robot_type("robot")
task = env.get_train_tasks()[0]
state = task.init
atoms = utils.abstract(state, env.predicates)
assert OnTable([block0]) in atoms
assert OnTable([block1]) in atoms
assert OnTable([block2]) not in atoms
assert On([block2, block1]) in atoms
# No block at this pose, pick fails
act = Pick.ground([robot, block0], np.zeros(0)).policy(state)
fake_state = state.copy()
fake_state.set(block0, "pose_y", state.get(block0, "pose_y") - 1)
next_state = env.simulate(fake_state, act)
assert fake_state.allclose(next_state)
# Object not clear, pick fails
act = Pick.ground([robot, block1], np.zeros(0)).policy(state)
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Cannot putontable or stack without picking first
act = Stack.ground([robot, block1], np.zeros(0)).policy(state)
next_state = env.simulate(state, act)
assert state.allclose(next_state)
act = PutOnTable.ground([robot], np.array([0.5, 0.5],
dtype=np.float32)).policy(state)
next_state = env.simulate(state, act)
assert state.allclose(next_state)
# Perform valid pick
act = Pick.ground([robot, block0], | np.zeros(0) | numpy.zeros |
# TODO: shapelet transform
# TODO: shapelet tree
# TODO: shapelet isolation tree for anomaly detection
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
import extractors.extractor as extractors
from util import sdist
import numpy as np
#from dtaidistance import dtw
from collections import Counter
import operator
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import util
class ShapeletTransformer(BaseEstimator, TransformerMixin):
""" An example transformer that returns the element-wise square root..
Parameters
----------
demo_param : str, optional
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
input_shape : tuple
The shape the data passed to :meth:`fit`
"""
def __init__(self, method=None, min_len=None, max_len=None,
nr_shapelets=1, metric='ig'):
if method is None:
method = 'fast'
if type(method) == str:
self.extractor = {
'fast': extractors.FastExtractor(),
'brute': extractors.BruteForceExtractor(),
'sax': extractors.SAXExtractor(),
'learn': extractors.LearningExtractor(),
'genetic': extractors.GeneticExtractor(),
'pso': extractors.ParticleSwarmExtractor()
}[method]
else:
self.extractor = method
self.shapelets = []
self.min_len = min_len
self.max_len = max_len
self.nr_shapelets = nr_shapelets
self.metric = metric
def fit(self, X, y=None):
"""A reference implementation of a fitting function for a transformer.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self.input_shape_ = X.shape
self.shapelets = self.extractor.extract(
X, y,
min_len=self.min_len,
max_len=self.max_len,
nr_shapelets=self.nr_shapelets,
metric=self.metric
)
# Return the transformer
return self
def transform(self, X):
""" A reference implementation of a transform function.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
X_transformed : array of int of shape = [n_samples, n_features]
The array containing the element-wise square roots of the values
in `X`
"""
# Check is fit had been called
check_is_fitted(self, ['shapelets'])
# Input validation
X = check_array(X)
feature_vectors = np.zeros((len(X), len(self.shapelets)))
for smpl_idx, sample in enumerate(X):
for shap_idx, shapelet in enumerate(self.shapelets):
feature_vectors[smpl_idx, shap_idx] = util.sdist_no_norm(shapelet.flatten(), sample)
return feature_vectors
class ShapeletTree(object):
def __init__(self, right=None, left=None, shapelet=None, threshold=None, class_probabilities={}):
self.right = right
self.left = left
self.shapelet = shapelet
self.threshold = threshold
self.class_probabilities = class_probabilities
def evaluate(self, time_serie, proba=True):
if self.is_leaf():
if proba:
return self.class_probabilities
else:
return max(self.class_probabilities.items(), key=operator.itemgetter(1))[0]
else:
dist = util.sdist(self.shapelet, time_serie)
if dist <= self.threshold:
return self.left.evaluate(time_serie, proba=proba)
else:
return self.right.evaluate(time_serie, proba=proba)
def predict(self, X):
return [ self.evaluate(ts, proba=False) for ts in X ]
def predict_proba(self, X):
return [ self.evaluate(ts, proba=True) for ts in X ]
def is_leaf(self):
return self.threshold is None
def extract_all_shapelets(self):
if self.is_leaf():
return None
else:
left_shap = self.left.extract_all_shapelets()
right_shap = self.right.extract_all_shapelets()
all_shapelets = [self.shapelet]
if left_shap is not None:
all_shapelets += left_shap
if right_shap is not None:
all_shapelets += right_shap
return all_shapelets
class ShapeletTreeClassifier(BaseEstimator, ClassifierMixin):
""" An example classifier which implements a 1-NN algorithm.
Parameters
----------
demo_param : str, optional
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
The input passed during :meth:`fit`
y_ : array, shape = [n_samples]
The labels passed during :meth:`fit`
"""
def __init__(self, method=None, max_depth=None, min_samples_split=1, min_len=None, max_len=None, metric='ig'):
if method is None:
method = 'fast'
if type(method) == str:
self.extractor = {
'fast': extractors.FastExtractor(),
'brute': extractors.BruteForceExtractor(),
'sax': extractors.SAXExtractor(),
'learn': extractors.LearningExtractor(),
'genetic': extractors.GeneticExtractor(),
'pso': extractors.ParticleSwarmExtractor()
}[method]
else:
self.extractor = method
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.metric = metric
self.min_len = min_len
self.max_len = max_len
self.tree = None
def _calc_probs(self, y):
probs = Counter(y)
total = sum(probs.values())
for k in probs: probs[k] /= total
return probs
def _extract_tree(self, X, y, depth=0):
if (self.max_depth is None or depth > self.max_depth) and len(np.unique(y)) > 1:
# Extract 1 shapelet, using the specified `extractor`
map_dict = {}
for j, c in enumerate( | np.unique(y) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 09:25:46 2017
@author: ben
"""
import numpy as np
import scipy.sparse as sp
from LSsurf.fd_grid import fd_grid
class lin_op:
def __init__(self, grid=None, row_0=0, col_N=None, col_0=None, name=None):
# a lin_op is an operator that represents a set of linear equations applied
# to the nodes of a grid (defined in fd_grid.py)
if col_0 is not None:
self.col_0=col_0
elif grid is not None:
self.col_0=grid.col_0
self.col_N=None
if col_N is not None:
self.col_N=col_N
elif grid is not None:
self.col_N=grid.col_N
self.row_0=row_0
self.N_eq=0
self.name=name
self.id=None
self.r=np.array([], dtype=int)
self.c=np.array([], dtype=int)
self.v=np.array([], dtype=float)
self.ind0=np.zeros([0], dtype=int)
self.TOC={'rows':dict(),'cols':dict()}
self.grid=grid
self.dst_grid=None
self.dst_ind0=None
self.expected=None
self.shape=None
self.size=None
def __update_size_and_shape__(self):
self.shape = (self.N_eq, self.col_N)
def diff_op(self, delta_subs, vals, which_nodes=None, valid_equations_only=True):
# build an operator that calculates linear combination of the surrounding
# values at each node of a grid.
# A template, given by delta_subs and vals contains a list of offsets
# in each direction of the grid, and a list of values corresponding
# to each offset. Only those nodes for which the template falls
# entirely inside the grid are included in the operator
if valid_equations_only:
# compute the maximum and minimum offset in each dimension. These
# will be used to eliminate equations that extend outside the model
# domain
max_deltas=[np.max(delta_sub) for delta_sub in delta_subs]
min_deltas=[np.min(delta_sub) for delta_sub in delta_subs]
else:
# treat the maximum and minimum offset in each dimension as zero,
# so no equations are truncated
max_deltas=[0 for delta_sub in delta_subs]
min_deltas=[0 for delta_sub in delta_subs]
#generate the center-node indices for each calculation
# if in dimension k, min_delta=-a and max_delta = +b, the number of indices is N,
# then the first valid center is a and the last is N-b
sub0s=np.meshgrid(*[np.arange(np.maximum(0, -min_delta), np.minimum(Ni, Ni-max_delta)) for Ni, min_delta, max_delta in zip(self.grid.shape, min_deltas, max_deltas)], indexing='ij')
sub0s=[sub.ravel() for sub in sub0s]
if which_nodes is not None:
temp_mask=np.in1d(self.grid.global_ind(sub0s), which_nodes)
sub0s=[temp[temp_mask] for temp in sub0s]
self.r, self.c=[np.zeros((len(sub0s[0]), len(delta_subs[0])), dtype=int) for _ in range(2)]
self.v=np.zeros_like(self.r, dtype=float)
self.N_eq=len(sub0s[0])
# loop over offsets
for ii in range(len(delta_subs[0])):
# build a list of subscripts over dimensions
this_sub=[sub0+delta[ii] for sub0, delta in zip(sub0s, delta_subs)]
self.r[:,ii]=self.row_0+np.arange(0, self.N_eq, dtype=int)
if valid_equations_only:
self.c[:,ii]=self.grid.global_ind(this_sub)
self.v[:,ii]=vals[ii].ravel()
else:
# need to remove out-of-bound subscripts
self.c[:,ii], valid_ind=self.grid.global_ind(this_sub, return_valid=True)
self.v[:,ii]=vals[ii].ravel()*valid_ind.ravel()
#if not valid_equations_only: [Leave this commented until it causes a problem]
# # remove the elements that have v=0
# nonzero_v = self.v.ravel() != 0
# self.r = self.r.ravel()[nonzero_v]
# self.c = self.c.ravel()[nonzero_v]
# self.v = self.v.ravel()[nonzero_v]
self.ind0 = self.grid.global_ind(sub0s).ravel()
self.TOC['rows'] = {self.name:range(self.N_eq)}
self.TOC['cols'] = {self.grid.name:np.arange(self.grid.col_0, self.grid.col_0+self.grid.N_nodes)}
self.__update_size_and_shape__()
return self
def add(self, op):
# combine a set of operators into a composite operator by adding them.
# the same thing could be accomplished by converting the operators to
# sparse arrays and adding the arrays, but this method keeps track of the
# table of contents for the operators.
# if a list of operators is provided, all are added together, or a single
# operator can be added to an existing operator.
if isinstance(op, list) or isinstance(op, tuple):
for this_op in op:
op.add(self, this_op)
return self
if self.r is not None:
self.r=np.append(self.r, op.r)
self.c=np.append(self.c, op.c)
self.v=np.append(self.v, op.v)
self.ind0=np.append(self.ind0, op.ind0)
else:
self.r=op.r
self.c=op.c
self.v=op.v
self.ind0=op.ind0
# assume that the new op may have columns that aren't in self.cols, and
# add any new columns to the table of contents
for key in op.TOC['cols'].keys():
self.TOC['cols'][key]=op.TOC['cols'][key]
self.col_N=np.maximum(self.col_N, op.col_N)
self.__update_size_and_shape__()
return self
def interp_mtx(self, pts):
# create a matrix that, when it multiplies a set of nodal values,
# gives the bilinear interpolation between those nodes at a set of
# data points
pts=[pp.ravel() for pp in pts]
# Identify the nodes surrounding each data point
# The floating-point subscript expresses the point locations in terms
# of their grid positions
ii=self.grid.float_sub(pts)
cell_sub=self.grid.cell_sub_for_pts(pts)
# calculate the fractional part of each cell_sub
i_local=[a-b for a, b in zip(ii,cell_sub)]
# find the index of the node below each data point
global_ind=self.grid.global_ind(cell_sub)
# make a list of dimensions based on the dimensions of the grid
if self.grid.N_dims==1:
list_of_dims=np.mgrid[0:2]
elif self.grid.N_dims==2:
list_of_dims=np.mgrid[0:2, 0:2]
elif self.grid.N_dims==3:
list_of_dims=np.mgrid[0:2, 0:2, 0:2]
delta_ind=np.c_[[kk.ravel() for kk in list_of_dims]]
n_neighbors=delta_ind.shape[1]
Npts=len(pts[0])
rr=np.zeros([Npts, n_neighbors], dtype=int)
cc=np.zeros([Npts, n_neighbors], dtype=int)
vv= | np.ones([Npts, n_neighbors], dtype=float) | numpy.ones |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
gggp.process(catp, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Finally a set (with all patches) using the GGGCrossCorrelation class.
gggc = treecorr.GGGCrossCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
print('CrossCorrelation:')
gggc.process(catp, catp, catp)
for g in gggc._all:
print(g.ntri.ravel())
print(g.gam0.ravel())
print(g.vargam0.ravel())
np.testing.assert_allclose(g.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam0, ggg.vargam0, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam1, ggg.vargam1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam2, ggg.vargam2, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam3, ggg.vargam3, rtol=0.05 * tol_factor)
fc = lambda gggc: np.concatenate([
[np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)]
for g in gggc._all])
print('jackknife:')
cov = gggc.estimate_cov('jackknife', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggc.estimate_cov('sample', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggc.estimate_cov('marked_bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggc.estimate_cov('bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.3*tol_factor)
# Without func, don't check the accuracy, but make sure it returns something the right shape.
cov = gggc.estimate_cov('jackknife')
assert cov.shape == (48, 48)
@timer
def test_nnn_jk():
# Test jackknife and other covariance estimates for nnn correlations.
if __name__ == '__main__':
# This setup takes about 1200 sec to run.
nhalo = 300
nsource = 2000
npatch = 16
source_factor = 50
rand_factor = 3
tol_factor = 1
elif False:
# This setup takes about 250 sec to run.
nhalo = 200
nsource = 1000
npatch = 16
source_factor = 50
rand_factor = 2
tol_factor = 2
else:
# This setup takes about 44 sec to run.
nhalo = 100
nsource = 500
npatch = 8
source_factor = 30
rand_factor = 1
tol_factor = 3
file_name = 'data/test_nnn_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
rng = np.random.RandomState()
nruns = 1000
all_nnns = []
all_nnnc = []
t0 = time.time()
for run in range(nruns):
t2 = time.time()
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng)
p = k**3
p /= np.sum(p)
ns = rng.poisson(nsource)
select = rng.choice(range(len(x)), size=ns, replace=False, p=p)
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k))
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rrr.process(rand_cat)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s, _ = ddd.calculateZeta(rrr)
zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd)
print('simple: ',zeta_s.ravel())
print('compensated: ',zeta_c.ravel())
all_nnns.append(zeta_s.ravel())
all_nnnc.append(zeta_c.ravel())
t3 = time.time()
print('time: ',round(t3-t2),round((t3-t0)/60),round((t3-t0)*(nruns/(run+1)-1)/60))
mean_nnns = np.mean(all_nnns, axis=0)
var_nnns = np.var(all_nnns, axis=0)
mean_nnnc = np.mean(all_nnnc, axis=0)
var_nnnc = np.var(all_nnnc, axis=0)
np.savez(file_name, mean_nnns=mean_nnns, var_nnns=var_nnns,
mean_nnnc=mean_nnnc, var_nnnc=var_nnnc)
data = np.load(file_name)
mean_nnns = data['mean_nnns']
var_nnns = data['var_nnns']
mean_nnnc = data['mean_nnnc']
var_nnnc = data['var_nnnc']
print('mean simple = ',mean_nnns)
print('var simple = ',var_nnns)
print('mean compensated = ',mean_nnnc)
print('var compensated = ',var_nnnc)
# Make a random catalog with 2x as many sources, randomly distributed .
rng = np.random.RandomState(1234)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
t0 = time.time()
rrr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
print('RRR:',rrr.tot)
print(rrr.ntri.ravel())
# Make the data catalog
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng=rng)
print('mean k = ',np.mean(k))
print('min,max = ',np.min(k),np.max(k))
p = k**3
p /= np.sum(p)
select = rng.choice(range(len(x)), size=nsource, replace=False, p=p)
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr)
zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd)
print('DDD:',ddd.tot)
print(ddd.ntri.ravel())
print('simple: ')
print(zeta_s1.ravel())
print(var_zeta_s1.ravel())
print('DRR:',drr.tot)
print(drr.ntri.ravel())
print('RDD:',rdd.tot)
print(rdd.ntri.ravel())
print('compensated: ')
print(zeta_c1.ravel())
print(var_zeta_c1.ravel())
# Make the patches with a large random catalog to make sure the patches are uniform area.
big_rx = rng.uniform(0,1000, 100*nsource)
big_ry = rng.uniform(0,1000, 100*nsource)
big_catp = treecorr.Catalog(x=big_rx, y=big_ry, npatch=npatch, rng=rng)
patch_centers = big_catp.patch_centers
# Do the same thing with patches on D, but not yet on R.
dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rddp = dddp.copy()
drrp = dddp.copy()
catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers)
print('Patch\tNtot')
for p in catp.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
print('with patches on D:')
dddp.process(catp)
rddp.process(rand_cat, catp)
drrp.process(catp, rand_cat)
# Need to run calculateZeta to get patch-based covariance
with assert_raises(RuntimeError):
dddp.estimate_cov('jackknife')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print('simple: ')
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
# Check the _calculate_xi_from_pairs function. Using all pairs, should get total xi.
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
# None of these are very good without the random using patches.
# I think this is basically just that the approximations used for estimating the area_frac
# to figure out the appropriate altered RRR counts isn't accurate enough when the total
# counts are as low as this. I think (hope) that it should be semi-ok when N is much larger,
# but this is probably saying that for 3pt using patches for R is even more important than
# for 2pt.
# Ofc, it could also be that this is telling me I still have a bug somewhere that I haven't
# managed to find... :(
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.3*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor)
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp)
print('compensated: ')
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=3.8*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
# Now with the random also using patches
# These are a lot better than the above tests. But still not nearly as good as we were able
# to get in 2pt. I'm pretty sure this is just due to the fact that we need to have much
# smaller catalogs to make it feasible to run this in a reasonable amount of time. I don't
# think this is a sign of any bug in the code.
print('with patched random catalog:')
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=patch_centers)
rrrp = rrr.copy()
rrrp.process(rand_catp)
drrp.process(catp, rand_catp)
rddp.process(rand_catp, catp)
print('simple: ')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.7*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.0*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('compensated: ')
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp)
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
# I haven't implemented calculateZeta for the NNNCrossCorrelation class, because I'm not
# actually sure what the right thing to do here is for calculating a single zeta vectors.
# Do we do a different one for each of the 6 permutations? Or one overall one?
# So rather than just do something, I'll wait until someone has a coherent use case where
# they want this and can explain exactly what the right thing to compute is.
# So to just exercise the machinery with NNNCrossCorrelation, I'm using a func parameter
# to compute something equivalent to the simple zeta calculation.
dddc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rrrc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
print('CrossCorrelation:')
dddc.process(catp, catp, catp)
rrrc.process(rand_catp, rand_catp, rand_catp)
def cc_zeta(corrs):
d, r = corrs
d1 = d.n1n2n3.copy()
d1._sum(d._all)
r1 = r.n1n2n3.copy()
r1._sum(r._all)
zeta, _ = d1.calculateZeta(r1)
return zeta.ravel()
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
# Repeat with a 1-2 cross-correlation
print('CrossCorrelation 1-2:')
dddc.process(catp, catp)
rrrc.process(rand_catp, rand_catp)
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nhalo = 100
ngal = 500
npatch = 16
rand_factor = 5
else:
nhalo = 100
ngal = 30
npatch = 16
rand_factor = 2
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(ngal, nhalo, rng)
rx = rng.uniform(0,1000, rand_factor*ngal)
ry = rng.uniform(0,1000, rand_factor*ngal)
rand_cat_nopatch = treecorr.Catalog(x=rx, y=ry)
rand_cat = treecorr.Catalog(x=rx, y=ry, npatch=npatch, rng=rng)
patch_centers = rand_cat.patch_centers
cat_nopatch = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch_centers=patch_centers)
print('cat patches = ',np.unique(cat.patch))
print('len = ',cat.nobj, cat.ntot)
assert cat.nobj == ngal
print('Patch\tNtot')
for p in cat.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
# Start with KKK, since relatively simple.
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat_nopatch)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
kkk.process(cat)
np.testing.assert_allclose(kkk.zeta, kkk1.zeta)
kkk_zeta_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat1)
print('zeta = ',kkk1.zeta.ravel())
kkk_zeta_list.append(kkk1.zeta.ravel())
kkk_zeta_list = np.array(kkk_zeta_list)
cov = np.cov(kkk_zeta_list.T, bias=True) * (len(kkk_zeta_list)-1)
varzeta = np.diagonal(np.cov(kkk_zeta_list.T, bias=True)) * (len(kkk_zeta_list)-1)
print('KKK: treecorr jackknife varzeta = ',kkk.varzeta.ravel())
print('KKK: direct jackknife varzeta = ',varzeta)
np.testing.assert_allclose(kkk.varzeta.ravel(), varzeta)
# Now GGG
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat_nopatch)
ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
ggg.process(cat)
np.testing.assert_allclose(ggg.gam0, ggg1.gam0)
np.testing.assert_allclose(ggg.gam1, ggg1.gam1)
np.testing.assert_allclose(ggg.gam2, ggg1.gam2)
np.testing.assert_allclose(ggg.gam3, ggg1.gam3)
ggg_gam0_list = []
ggg_gam1_list = []
ggg_gam2_list = []
ggg_gam3_list = []
ggg_map3_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat1)
ggg_gam0_list.append(ggg1.gam0.ravel())
ggg_gam1_list.append(ggg1.gam1.ravel())
ggg_gam2_list.append(ggg1.gam2.ravel())
ggg_gam3_list.append(ggg1.gam3.ravel())
ggg_map3_list.append(ggg1.calculateMap3()[0])
ggg_gam0_list = np.array(ggg_gam0_list)
vargam0 = np.diagonal(np.cov(ggg_gam0_list.T, bias=True)) * (len(ggg_gam0_list)-1)
print('GGG: treecorr jackknife vargam0 = ',ggg.vargam0.ravel())
print('GGG: direct jackknife vargam0 = ',vargam0)
np.testing.assert_allclose(ggg.vargam0.ravel(), vargam0)
ggg_gam1_list = np.array(ggg_gam1_list)
vargam1 = np.diagonal(np.cov(ggg_gam1_list.T, bias=True)) * (len(ggg_gam1_list)-1)
print('GGG: treecorr jackknife vargam1 = ',ggg.vargam1.ravel())
print('GGG: direct jackknife vargam1 = ',vargam1)
np.testing.assert_allclose(ggg.vargam1.ravel(), vargam1)
ggg_gam2_list = np.array(ggg_gam2_list)
vargam2 = np.diagonal(np.cov(ggg_gam2_list.T, bias=True)) * (len(ggg_gam2_list)-1)
print('GGG: treecorr jackknife vargam2 = ',ggg.vargam2.ravel())
print('GGG: direct jackknife vargam2 = ',vargam2)
np.testing.assert_allclose(ggg.vargam2.ravel(), vargam2)
ggg_gam3_list = np.array(ggg_gam3_list)
vargam3 = np.diagonal(np.cov(ggg_gam3_list.T, bias=True)) * (len(ggg_gam3_list)-1)
print('GGG: treecorr jackknife vargam3 = ',ggg.vargam3.ravel())
print('GGG: direct jackknife vargam3 = ',vargam3)
np.testing.assert_allclose(ggg.vargam3.ravel(), vargam3)
ggg_map3_list = np.array(ggg_map3_list)
varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1)
covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife',
lambda corrs: corrs[0].calculateMap3()[0])
print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3))
print('GGG: direct jackknife varmap3 = ',varmap3)
np.testing.assert_allclose(np.diagonal(covmap3), varmap3)
# Finally NNN, where we need to use randoms. Both simple and compensated.
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
drr = ddd.copy()
rdd = ddd.copy()
rrr = ddd.copy()
ddd.process(cat)
drr.process(cat, rand_cat)
rdd.process(rand_cat, cat)
rrr.process(rand_cat)
zeta1_list = []
zeta2_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
rand_cat1 = treecorr.Catalog(x=rand_cat.x[rand_cat.patch != i],
y=rand_cat.y[rand_cat.patch != i])
ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
drr1 = ddd1.copy()
rdd1 = ddd1.copy()
rrr1 = ddd1.copy()
ddd1.process(cat1)
drr1.process(cat1, rand_cat1)
rdd1.process(rand_cat1, cat1)
rrr1.process(rand_cat1)
zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel())
zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel())
print('simple')
zeta1_list = np.array(zeta1_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr)
varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta1)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta1)
print('compensated')
print(zeta2_list)
zeta2_list = np.array(zeta2_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd)
varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta2)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta2)
# Can't do patch calculation with different numbers of patches in rrr, drr, rdd.
rand_cat3 = treecorr.Catalog(x=rx, y=ry, npatch=3)
cat3 = treecorr.Catalog(x=x, y=y, patch_centers=rand_cat3.patch_centers)
rrr3 = rrr.copy()
drr3 = drr.copy()
rdd3 = rdd.copy()
rrr3.process(rand_cat3)
drr3.process(cat3, rand_cat3)
rdd3.process(rand_cat3, cat3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3, drr, rdd)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd)
@timer
def test_finalize_false():
nsource = 80
nhalo = 100
npatch = 16
# Make three independent data sets
rng = np.random.RandomState(8675309)
x_1, y_1, g1_1, g2_1, k_1 = generate_shear_field(nsource, nhalo, rng)
x_2, y_2, g1_2, g2_2, k_2 = generate_shear_field(nsource, nhalo, rng)
x_3, y_3, g1_3, g2_3, k_3 = generate_shear_field(nsource, nhalo, rng)
# Make a single catalog with all three together
cat = treecorr.Catalog(x=np.concatenate([x_1, x_2, x_3]),
y=np.concatenate([y_1, y_2, y_3]),
g1=np.concatenate([g1_1, g1_2, g1_3]),
g2=np.concatenate([g2_1, g2_2, g2_3]),
k=np.concatenate([k_1, k_2, k_3]),
npatch=npatch)
# Now the three separately, using the same patch centers
cat1 = treecorr.Catalog(x=x_1, y=y_1, g1=g1_1, g2=g2_1, k=k_1, patch_centers=cat.patch_centers)
cat2 = treecorr.Catalog(x=x_2, y=y_2, g1=g1_2, g2=g2_2, k=k_2, patch_centers=cat.patch_centers)
cat3 = treecorr.Catalog(x=x_3, y=y_3, g1=g1_3, g2=g2_3, k=k_3, patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat1.patch, cat.patch[0:nsource])
np.testing.assert_array_equal(cat2.patch, cat.patch[nsource:2*nsource])
np.testing.assert_array_equal(cat3.patch, cat.patch[2*nsource:3*nsource])
# KKK auto
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk1.process(cat)
kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk2.process(cat1, initialize=True, finalize=False)
kkk2.process(cat2, initialize=False, finalize=False)
kkk2.process(cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat2, cat1, initialize=False, finalize=False)
kkk2.process(cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat1, initialize=False, finalize=False)
kkk2.process(cat3, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross12
cat23 = treecorr.Catalog(x=np.concatenate([x_2, x_3]),
y=np.concatenate([y_2, y_3]),
g1=np.concatenate([g1_2, g1_3]),
g2=np.concatenate([g2_2, g2_3]),
k=np.concatenate([k_2, k_3]),
patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource])
kkk1.process(cat1, cat23)
kkk2.process(cat1, cat2, initialize=True, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross12
kkkc1 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc1.process(cat1, cat23)
kkkc2 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc2.process(cat1, cat2, initialize=True, finalize=False)
kkkc2.process(cat1, cat3, initialize=False, finalize=False)
kkkc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
| np.testing.assert_allclose(kkk1.meand3, kkk2.meand3) | numpy.testing.assert_allclose |
import numpy as np
import pytest
from numpy import ndarray
from numpy.testing import assert_array_equal
from pytest import approx
from meshkernel import (
DeleteMeshOption,
GeometryList,
InputError,
Mesh2d,
MeshKernel,
MeshKernelError,
MeshRefinementParameters,
RefinementType,
)
cases_is_geometric_constructor = [(True), (False)]
@pytest.mark.parametrize("is_geometric", cases_is_geometric_constructor)
def test_constructor(is_geometric: bool):
"""Test if the constructor works"""
MeshKernel(is_geometric)
def test_different_instances_have_different_ids():
"""Test if the meshkernelid of two instances differs"""
mk_1 = MeshKernel()
mk_2 = MeshKernel()
assert mk_1._meshkernelid != mk_2._meshkernelid
def test_mesh2d_set_and_mesh2d_get():
"""Test to set a simple mesh and then get it again with new parameters
3---2
| |
0---1
"""
mk = MeshKernel()
edge_nodes = np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.int32)
node_x = np.array([0.0, 1.0, 1.0, 0.0], dtype=np.double)
node_y = np.array([0.0, 0.0, 1.0, 1.0], dtype=np.double)
input_mesh2d = Mesh2d(node_x, node_y, edge_nodes)
mk.mesh2d_set(input_mesh2d)
output_mesh2d = mk.mesh2d_get()
# Test if the input and output differs
| assert_array_equal(output_mesh2d.edge_nodes, input_mesh2d.edge_nodes) | numpy.testing.assert_array_equal |
from __future__ import division
import cv2
import numpy as np
import math
import json
from collections import OrderedDict
import pdb
import util
def _cvt_point(p):
if np.ndim(p) == 1:
p = [p]
if np.ndim(p) == 2:
return np.asarray([[[pt[0], pt[1]] for pt in p]], dtype = np.float32)
assert np.ndim(p) == 3
return p
def fov(fx, w):
tan = w / fx * .5
return np.arctan(tan) * 2 / np.pi * 180.0
def min_mono_visible_distance(vertical_fov, install_height):
return install_height * np.tan(vertical_fov / 180 * np.pi)
def min_bino_visible_distance(fx, w, B):
return fx / w * B
class JsonObject(object):
_fields = []
def __init__(self, **data):
if type(self._fields) == str:
self._fields = self._fields.split(",")
for f in self._fields:
self.__setattr__(f, None)
if data is not None:
JsonObject.load(self, data)
def load(self, data):
for f in self._fields:
v = None
if f in data:
v = data[f]
self.__setattr__(f, v)
def as_dict(self, excluded = None, **kwargs):
data = OrderedDict()
for f in self._fields:
if not excluded or f not in excluded:
data[f] = self.__getattribute__(f)
return data
def dump(self, path, **kwargs):
s = self.dumps(**kwargs)
parent_path = os.path.dirname(path)
if not os.path.exists(parent_path):
os.makedirs(parent_path)
with open(path, "w") as f:
f.write(s)
def dumps(self, excluded = None, *args, **kwargs):
def default(o):
if isinstance(o, JsonObject):
return o.as_dict(excluded = excluded, **kwargs)
if isinstance(o, np.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
if "indent" not in kwargs:
kwargs["indent"] = True
return json.dumps(self, default = default,**kwargs)
def __repr__(self, *args, **kwargs):
return self.dumps(*args, **kwargs)
def __str__(self, *args, **kwargs):
return self.dumps(*args, **kwargs)
class CameraModel(JsonObject):
_fields = "fx,fy,cx,cy,k1,k2,p1,p2,k3,size,P,R,alpha"
def __init__(self, **kwargs):
if "k3" not in kwargs:
kwargs["k3"] = 0
JsonObject.__init__(self, **kwargs)
self.intrinsics = np.eye(3, dtype = np.float64)
self.distortion = np.zeros((5, 1), np.float64) # rational polynomial
self.intrinsics[0, 0] = self.fx
self.intrinsics[1, 1] = self.fy
self.intrinsics[0, 2] = self.cx
self.intrinsics[1, 2] = self.cy
self.distortion[0][0] = self.k1
self.distortion[1][0] = self.k2
self.distortion[2][0] = self.p1
self.distortion[3][0] = self.p2
self.size = tuple(self.size)
self.w, self.h = self.size
if self.P is not None:
self.P = np.asarray(self.P)
else:
self.P = np.zeros((3, 4))
self.R = np.asarray(self.R)
self.set_alpha(self.alpha)
def set_alpha(self, a):
"""
Set the alpha value for the calibrated camera solution. The alpha
value is a zoom, and ranges from 0 (zoomed in, all pixels in
calibrated image are valid) to 1 (zoomed out, all pixels in
original image are in calibrated image).
"""
self.alpha = a
if a is not None:
ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a)
else:
ncm = self.intrinsics
for j in range(3):
for i in range(3):
self.P[j,i] = ncm[j, i]
self.fx = self.P[0, 0]
self.fy = self.P[1, 1]
self.cx = self.P[0, 2]
self.cy = self.P[1, 2]
self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion,
self.R, ncm, self.size, cv2.CV_32FC1)
def camera2normizedimage(self, x, y, z, homo = False):
coord = [[x / z], [y / z]]
if homo:
coord.append([1])
return | np.asanyarray(coord) | numpy.asanyarray |
import cv2
import numpy as np
from PIL import Image
from shapely.geometry import LineString as shape_string
from shapely.geometry import Polygon as shape_poly
def angle_between(p1, p2):
ang1 = np.arctan2(*p1[::-1])
ang2 = np.arctan2(*p2[::-1])
return (ang2 - ang1) % (2 * np.pi)
def pad_to(im, size=3000):
row, col = im.shape[:2]
pad_r = (size - row) // 2
pad_c = (size - col) // 2
border = cv2.copyMakeBorder(
im, top=pad_r, bottom=pad_r, left=pad_c, right=pad_c, borderType=cv2.BORDER_CONSTANT, value=[0]
)
return border
def semmap_to_lightmap(sem):
def gkern(l=10, sig=5):
"""\
creates gaussian kernel with side length l and a sigma of sig
"""
ax = np.linspace(-(l - 1) / 2.0, (l - 1) / 2.0, l)
xx, yy = np.meshgrid(ax, ax)
kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))
return kernel / np.sum(kernel)
kernel = gkern()
sem_map = np.array(sem)
kitchen_bathroom = np.logical_or(np.logical_or(sem_map == 10, sem_map == 1), sem_map == 19)
kitchen_bathroom_filtered = cv2.dilate(kitchen_bathroom.astype(np.float32), kernel.astype(np.float32))
kitchen_bathroom_filtered = cv2.filter2D(
kitchen_bathroom_filtered.astype(np.float32), -1, kernel.astype(np.float32)
)
return Image.fromarray((kitchen_bathroom_filtered * 255).astype(np.uint8))
class BBox(object):
def __init__(self, raw_pts=None, zmin=None, zmax=None, from_dict=None):
if raw_pts is None:
self.load_dict(from_dict)
else:
self.raw_pts = raw_pts
self.z = (zmin, zmax)
self.init_box()
def init_box(self):
self.center = self.raw_pts.mean(axis=0)
self.calc_nrm()
def get_scale(self):
return np.linalg.norm(self.edge_x), np.linalg.norm(self.edge_y), self.z[1] - self.z[0]
def calc_nrm(self):
if self.raw_pts[0, 0] > self.raw_pts[1, 0]:
id1, id2 = 1, 0
else:
id1, id2 = 0, 1
direction = self.raw_pts[id1, :] - self.raw_pts[id2, :]
dist = np.linalg.norm(direction)
des_len = 0.4
nrm = np.array([-direction[1] * des_len / dist, direction[0] * des_len / dist])
nrm_start = (self.raw_pts[id1] + self.raw_pts[id2]) / 2.0
cand1 = nrm_start + nrm
cand2 = nrm_start - nrm
if np.linalg.norm(cand1 - self.center) > np.linalg.norm(cand2 - self.center):
flip_factor = 1
else:
flip_factor = -1
nrm = nrm * flip_factor
self.nrm = nrm / np.linalg.norm(nrm)
# self.flip_factor = flip_factor
# return nrm
self.edge_y = direction * flip_factor
self.edge_x = np.linalg.norm(self.raw_pts[0, :] - self.raw_pts[3, :]) * self.nrm
def get_coords(self):
"""
Return the vertices of the bounding box, in order of BL,BR,TR,TL
"""
x = self.edge_x / 2.0
y = self.edge_y / 2.0
return np.array([self.center - x - y, self.center + x - y, self.center + x + y, self.center - x + y])
def get_rotation_angle(self, CAD_dir=(0, 1)):
# dir_y, dir_x = self.nrm / np.linalg.norm(self.nrm)
dir_y, dir_x = self.nrm[:]
# theta = np.arcsin(np.cross([1,0], [dir_x, dir_y]))
return angle_between(CAD_dir, (dir_x, dir_y))
def get_center(self):
return (*self.center, (self.z[0] + self.z[1]) / 2.0)
def get_nrm(self):
return self.nrm
def as_dict(self):
return {
"normal": tuple(self.nrm),
"center": tuple(self.center),
"edge_x": tuple(self.edge_x),
"edge_y": tuple(self.edge_y),
"raw_pts": self.raw_pts.tolist(),
"theta": self.get_rotation_angle(),
"z": tuple(self.z),
}
def load_dict(self, d):
self.nrm = np.array(d["normal"])
self.center = np.array(d["center"])
self.edge_x = np.array(d["edge_x"])
self.edge_y = np.array(d["edge_y"])
self.z = d["z"]
self.raw_pts = np.array(d["raw_pts"])
def polygon_to_bbox(Y, X, Z, rotation=None, flip_image=None, scale_factor=100.0):
pts = np.vstack((Y, X)).transpose()
center = pts.mean(axis=0)
dists = np.array([np.linalg.norm(pts[i] - pts[i - 1]) for i in range(4)])
max_idx = np.argmax(dists)
p0, p1 = pts[max_idx], pts[max_idx - 1]
edge_y = p1 - p0
edge_y_dir = edge_y / np.linalg.norm(edge_y)
edge_y_projected = np.array([np.dot(pts[i] - p1, edge_y_dir) for i in range(4)])
edge_y_raw = edge_y_dir * np.ptp(edge_y_projected)
mid_y = (p0 + p1) / 2.0
edge_x_crook = center - mid_y
edge_x_raw = 2 * (edge_x_crook - (edge_y_dir * np.dot(edge_x_crook, edge_y_dir)))
edge_x_dir = edge_x_raw / np.linalg.norm(edge_x_raw)
# so we have:
# edge_x, edge_y, center here
# Z is given as input
# we need to figure out normal direction (aka theta)
# and reorient X/Y accordingly
if rotation is None:
# this means that the object is wall/door/window
if np.linalg.norm(edge_x_raw) > np.linalg.norm(edge_y_raw):
edge_x = edge_y_raw
edge_y = edge_x_raw
nrm = edge_y_dir
else:
edge_y = edge_y_raw
edge_x = edge_x_raw
nrm = edge_x_dir
else:
# this means that this is a fixed furniture
forward_direction = np.array([1, 0])
rotated = np.matmul(
np.asarray([[np.cos(rotation), -np.sin(rotation)], [np.sin(rotation), np.cos(rotation)]]), forward_direction
)
fit_x = np.dot(edge_x_dir, rotated)
fit_y = np.dot(edge_y_dir, rotated)
if abs(fit_y) > abs(fit_x):
edge_x = edge_y_raw
edge_y = edge_x_raw
nrm = edge_y_dir * np.sign(fit_y)
else:
edge_y = edge_y_raw
edge_x = edge_x_raw
nrm = edge_x_dir * np.sign(fit_x)
if flip_image is not None:
if should_flip(center, nrm, flip_image):
nrm = -nrm
bbox_dict = {
"center": center / scale_factor,
"z": Z,
"edge_x": edge_x / scale_factor,
"edge_y": edge_y / scale_factor,
"normal": nrm,
"raw_pts": pts / scale_factor,
}
return BBox(None, None, None, bbox_dict)
def should_flip(center, nrm, flip_image):
flip_image_np = np.asarray(flip_image).astype(int)
# plt.imshow(flip_image_np)
# normal direction
normal_dist = 0
width, height = flip_image_np.shape
for i in range(1000):
y, x = np.round(center + i * nrm).astype(int)
if x >= width or x < 0 or y >= height or y < 0:
normal_dist = 2000
break
if flip_image_np[x, y]:
normal_dist = i
break
# flipped direction
flip_dist = 0
for i in range(1000):
y, x = np.round(center - i * nrm).astype(int)
if x >= width or x < 0 or y >= height or y < 0:
flip_dist = 2000
break
if flip_image_np[x, y]:
flip_dist = i
break
y, x = np.round(center).astype(int)
# print(nrm, center, normal_dist, flip_dist)
return flip_dist > normal_dist
# def squash_to_size(xmin,xmax,ymin,ymax,scale):
def squash_to_size(bbox, scale):
size = random.choice(scale)
print("squashing object...")
x, y, _ = bbox.get_scale()
# print(bbox.get_scale())
if x < y:
bbox.edge_x = bbox.edge_x / np.linalg.norm(bbox.edge_x) * size
else:
bbox.edge_y = bbox.edge_y / np.linalg.norm(bbox.edge_y) * size
def get_unique(doc, val):
it = doc.getElementsByTagName(val)
if len(it) > 1:
raise ValueError("value not unique...")
return it[0].firstChild.nodeValue
def snap_out_of_wall(obj, wall, obj_poly, wall_poly):
intersection = wall_poly.intersection(obj_poly)
if type(intersection) == shape_string:
overlap_margin = 0.001
for move in [(overlap_margin, 0), (-overlap_margin, 0), (0, overlap_margin), (0, -overlap_margin)]:
temp_obj = shape_poly(obj.get_coords() + move)
if not temp_obj.intersects(wall_poly):
obj.center += move
return
elif type(intersection) == shape_poly:
xy = np.array(intersection.exterior.coords.xy)
ptp = xy.ptp(axis=1) + 0.001
for move in [(ptp[0], 0), (-ptp[0], 0), (0, ptp[1]), (0, -ptp[1])]:
temp_obj = shape_poly(obj.get_coords() + move)
if not temp_obj.intersects(wall_poly):
obj.center += move
return
def snap_on_wall(obj, wall, obj_poly, wall_poly):
if wall_poly.intersects(obj_poly):
intersection = wall_poly.intersection(obj_poly)
if type(intersection) == shape_string:
return
elif type(intersection) == shape_poly:
xy = np.array(intersection.exterior.coords.xy)
ptp = xy.ptp(axis=1)
for move in [(ptp[0], 0), (-ptp[0], 0), (0, ptp[1]), (0, -ptp[1])]:
temp_obj = shape_poly(obj.get_coords() + move)
if type(temp_obj.intersection(wall_poly)) == shape_string:
obj.center += move
return
else:
distance = wall_poly.distance(obj_poly)
overlaps = []
dirs = [(distance, 0), (-distance, 0), (0, distance), (0, -distance)]
for move in dirs:
temp_obj = shape_poly(obj.get_coords() + move)
if not temp_obj.intersects(wall_poly):
overlaps.append(np.finfo(np.float).max)
else:
overlaps.append(obj_poly.intersection(wall_poly).area)
snap_dir = np.argmin(np.array(overlaps))
obj.center += dirs[snap_dir]
return
print("No wall is found to fit the criteria...")
import random
def gen_cube_obj(bbox, file_path, is_color=False, should_save=True):
vertices = []
a, b, c, d = bbox.get_coords()
for x, y in [a, b, d, c]:
vertices.append((x, y, bbox.z[1]))
for x, y in [a, b, d, c]:
vertices.append((x, y, bbox.z[0]))
c = | np.random.rand(3) | numpy.random.rand |
from __future__ import print_function, absolute_import, division
import contextlib
import sys
import numpy as np
import random
import threading
import gc
from numba import unittest_support as unittest
from numba.errors import TypingError
from numba import config
from numba import njit
from numba import types
from numba import utils
from numba.numpy_support import version as numpy_version
from .support import MemoryLeakMixin, TestCase, tag
nrtjit = njit(_nrt=True, nogil=True)
def np_concatenate1(a, b, c):
return np.concatenate((a, b, c))
def np_concatenate2(a, b, c, axis):
return np.concatenate((a, b, c), axis=axis)
def np_stack1(a, b, c):
return np.stack((a, b, c))
def np_stack2(a, b, c, axis):
return np.stack((a, b, c), axis=axis)
def np_hstack(a, b, c):
return np.hstack((a, b, c))
def np_vstack(a, b, c):
return np.vstack((a, b, c))
def np_dstack(a, b, c):
return np.dstack((a, b, c))
def np_column_stack(a, b, c):
return np.column_stack((a, b, c))
class BaseTest(TestCase):
def check_outputs(self, pyfunc, argslist, exact=True):
cfunc = nrtjit(pyfunc)
for args in argslist:
expected = pyfunc(*args)
ret = cfunc(*args)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
if exact:
np.testing.assert_equal(expected, ret)
else:
np.testing.assert_allclose(expected, ret)
class NrtRefCtTest(MemoryLeakMixin):
def assert_array_nrt_refct(self, arr, expect):
self.assertEqual(arr.base.refcount, expect)
class TestDynArray(NrtRefCtTest, TestCase):
def test_empty_0d(self):
@nrtjit
def foo():
arr = np.empty(())
arr[()] = 42
return arr
arr = foo()
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(42, arr)
self.assertEqual(arr.size, 1)
self.assertEqual(arr.shape, ())
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, ())
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_1d(self):
@nrtjit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_2d(self):
def pyfunc(m, n):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
expected_arr = pyfunc(m, n)
got_arr = cfunc(m, n)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_empty_3d(self):
def pyfunc(m, n, p):
arr = np.empty((m, n, p), np.int32)
for i in range(m):
for j in range(n):
for k in range(p):
arr[i, j, k] = i + j + k
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_empty_2d_sliced(self):
def pyfunc(m, n, p):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr[p]
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
@tag('important')
def test_return_global_array(self):
y = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(y)
def return_external_array():
return y
cfunc = nrtjit(return_external_array)
out = cfunc()
# out reference by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
np.testing.assert_equal(y, out)
np.testing.assert_equal(y, np.ones(4, dtype=np.float32))
np.testing.assert_equal(out, np.ones(4, dtype=np.float32))
del out
gc.collect()
# out is only referenced by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
del cfunc
gc.collect()
# y is no longer referenced by cfunc
self.assertEqual(initrefct, sys.getrefcount(y))
@tag('important')
def test_return_global_array_sliced(self):
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y[2:]
cfunc = nrtjit(return_external_array)
out = cfunc()
self.assertIsNone(out.base)
yy = y[2:]
np.testing.assert_equal(yy, out)
np.testing.assert_equal(yy, np.ones(2, dtype=np.float32))
np.testing.assert_equal(out, np.ones(2, dtype=np.float32))
def test_array_pass_through(self):
def pyfunc(y):
return y
arr = np.ones(4, dtype=np.float32)
cfunc = nrtjit(pyfunc)
expected = cfunc(arr)
got = pyfunc(arr)
np.testing.assert_equal(expected, arr)
np.testing.assert_equal(expected, got)
self.assertIs(expected, arr)
self.assertIs(expected, got)
@tag('important')
def test_array_pass_through_sliced(self):
def pyfunc(y):
return y[y.size // 2:]
arr = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(arr)
cfunc = nrtjit(pyfunc)
got = cfunc(arr)
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
expected = pyfunc(arr)
self.assertEqual(initrefct + 2, sys.getrefcount(arr))
np.testing.assert_equal(expected, arr[arr.size // 2])
np.testing.assert_equal(expected, got)
del expected
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
del got
self.assertEqual(initrefct, sys.getrefcount(arr))
def test_ufunc_with_allocated_output(self):
def pyfunc(a, b):
out = np.empty(a.shape)
np.add(a, b, out)
return out
cfunc = nrtjit(pyfunc)
# 1D case
arr_a = np.random.random(10)
arr_b = np.random.random(10)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 2D case
arr_a = np.random.random(10).reshape(2, 5)
arr_b = np.random.random(10).reshape(2, 5)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 3D case
arr_a = np.random.random(70).reshape(2, 5, 7)
arr_b = np.random.random(70).reshape(2, 5, 7)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
def test_allocation_mt(self):
"""
This test exercises the array allocation in multithreaded usecase.
This stress the freelist inside NRT.
"""
def pyfunc(inp):
out = np.empty(inp.size)
# Zero fill
for i in range(out.size):
out[i] = 0
for i in range(inp[0]):
# Allocate inside a loop
tmp = np.empty(inp.size)
# Write to tmp
for j in range(tmp.size):
tmp[j] = inp[j]
# out = tmp + i
for j in range(tmp.size):
out[j] += tmp[j] + i
return out
cfunc = nrtjit(pyfunc)
size = 10 # small array size so that the computation is short
arr = np.random.randint(1, 10, size)
frozen_arr = arr.copy()
np.testing.assert_equal(pyfunc(arr), cfunc(arr))
# Ensure we did not modify the input
np.testing.assert_equal(frozen_arr, arr)
workers = []
inputs = []
outputs = []
# Make wrapper to store the output
def wrapped(inp, out):
out[:] = cfunc(inp)
# Create a lot of worker threads to create contention
for i in range(100):
arr = np.random.randint(1, 10, size)
out = np.empty_like(arr)
thread = threading.Thread(target=wrapped,
args=(arr, out),
name="worker{0}".format(i))
workers.append(thread)
inputs.append(arr)
outputs.append(out)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for inp, out in zip(inputs, outputs):
np.testing.assert_equal(pyfunc(inp), out)
def test_refct_mt(self):
"""
This test exercises the refct in multithreaded code
"""
def pyfunc(n, inp):
out = np.empty(inp.size)
for i in range(out.size):
out[i] = inp[i] + 1
# Use swap to trigger many refct ops
for i in range(n):
out, inp = inp, out
return out
cfunc = nrtjit(pyfunc)
size = 10
input = np.arange(size, dtype=np.float)
expected_refct = sys.getrefcount(input)
swapct = random.randrange(1000)
expected = pyfunc(swapct, input)
np.testing.assert_equal(expected, cfunc(swapct, input))
# The following checks can discover a reference count error
del expected
self.assertEqual(expected_refct, sys.getrefcount(input))
workers = []
outputs = []
swapcts = []
# Make wrapper to store the output
def wrapped(n, input, out):
out[:] = cfunc(n, input)
# Create worker threads
for i in range(100):
out = np.empty(size)
# All thread shares the same input
swapct = random.randrange(1000)
thread = threading.Thread(target=wrapped,
args=(swapct, input, out),
name="worker{0}".format(i))
workers.append(thread)
outputs.append(out)
swapcts.append(swapct)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for swapct, out in zip(swapcts, outputs):
np.testing.assert_equal(pyfunc(swapct, input), out)
del outputs, workers
# The following checks can discover a reference count error
self.assertEqual(expected_refct, sys.getrefcount(input))
def test_swap(self):
def pyfunc(x, y, t):
"""Swap array x and y for t number of times
"""
for i in range(t):
x, y = y, x
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(100)
y = np.random.random(100)
t = 100
initrefct = sys.getrefcount(x), sys.getrefcount(y)
expect, got = pyfunc(x, y, t), cfunc(x, y, t)
self.assertIsNone(got[0].base)
self.assertIsNone(got[1].base)
np.testing.assert_equal(expect, got)
del expect, got
self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y)))
def test_return_tuple_of_array(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
initrefct = sys.getrefcount(x)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
self.assertIs(x, expected_x)
self.assertIs(x, got_x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
del expected_x, got_x
self.assertEqual(initrefct, sys.getrefcount(x))
self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y))
def test_return_tuple_of_array_created(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
out = y, y
return out
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
# getrefcount owns 1, got_y owns 1
self.assertEqual(2, sys.getrefcount(got_y))
def test_issue_with_return_leak(self):
"""
Dispatcher returns a new reference.
It need to workaround it for now.
"""
@nrtjit
def inner(out):
return out
def pyfunc(x):
return inner(x)
cfunc = nrtjit(pyfunc)
arr = np.arange(10)
old_refct = sys.getrefcount(arr)
self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(cfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(arr))
class ConstructorBaseTest(NrtRefCtTest):
def check_0d(self, pyfunc):
cfunc = nrtjit(pyfunc)
expected = pyfunc()
ret = cfunc()
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
def check_1d(self, pyfunc):
cfunc = nrtjit(pyfunc)
n = 3
expected = pyfunc(n)
ret = cfunc(n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(-1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_2d(self, pyfunc):
cfunc = nrtjit(pyfunc)
m, n = 2, 3
expected = pyfunc(m, n)
ret = cfunc(m, n)
self.assert_array_nrt_refct(ret, 1)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.shape, expected.shape)
self.assertEqual(ret.dtype, expected.dtype)
self.assertEqual(ret.strides, expected.strides)
self.check_result_value(ret, expected)
# test writability
expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8
expected.fill(123)
ret.fill(123)
np.testing.assert_equal(ret, expected)
# errors
with self.assertRaises(ValueError) as cm:
cfunc(2, -1)
self.assertEqual(str(cm.exception), "negative dimensions not allowed")
def check_alloc_size(self, pyfunc):
"""Checks that pyfunc will error, not segfaulting due to array size."""
cfunc = nrtjit(pyfunc)
with self.assertRaises(ValueError) as e:
cfunc()
self.assertIn(
"array is too big",
str(e.exception)
)
class TestNdZeros(ConstructorBaseTest, TestCase):
def setUp(self):
super(TestNdZeros, self).setUp()
self.pyfunc = np.zeros
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
pyfunc = self.pyfunc
def func():
return pyfunc(())
self.check_0d(func)
def test_1d(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n)
self.check_1d(func)
def test_1d_dtype(self):
pyfunc = self.pyfunc
def func(n):
return pyfunc(n, np.int32)
self.check_1d(func)
def test_1d_dtype_instance(self):
# dtype as numpy dtype, not as scalar class
pyfunc = self.pyfunc
_dtype = np.dtype('int32')
def func(n):
return pyfunc(n, _dtype)
self.check_1d(func)
def test_2d(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n))
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
pyfunc = self.pyfunc
def func1(m, n):
return pyfunc((np.int16(m), np.int32(n)))
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return pyfunc((np.int64(m), np.int8(n)))
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: pyfunc((m, n)))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
@tag('important')
def test_2d_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(m, n):
return pyfunc((m, n), dtype=np.complex64)
self.check_2d(func)
def test_alloc_size(self):
pyfunc = self.pyfunc
width = types.intp.bitwidth
def gen_func(shape, dtype):
return lambda : pyfunc(shape, dtype)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, np.intp))
self.check_alloc_size(gen_func((1 << width - 8, 64), np.intp))
class TestNdOnes(TestNdZeros):
def setUp(self):
super(TestNdOnes, self).setUp()
self.pyfunc = np.ones
@unittest.skipIf(numpy_version < (1, 8), "test requires Numpy 1.8 or later")
class TestNdFull(ConstructorBaseTest, TestCase):
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_0d(self):
def func():
return np.full((), 4.5)
self.check_0d(func)
def test_1d(self):
def func(n):
return np.full(n, 4.5)
self.check_1d(func)
def test_1d_dtype(self):
def func(n):
return np.full(n, 4.5, np.bool_)
self.check_1d(func)
def test_1d_dtype_instance(self):
dtype = np.dtype('bool')
def func(n):
return np.full(n, 4.5, dtype)
self.check_1d(func)
def test_2d(self):
def func(m, n):
return np.full((m, n), 4.5)
self.check_2d(func)
def test_2d_dtype_kwarg(self):
def func(m, n):
return np.full((m, n), 1 + 4.5j, dtype=np.complex64)
self.check_2d(func)
def test_2d_dtype_from_type(self):
# tests issue #2862
def func(m, n):
return np.full((m, n), np.int32(1))
self.check_2d(func)
# tests meta issues from #2862, that np < 1.12 always
# returns float64. Complex uses `.real`, imaginary part dropped
def func(m, n):
return np.full((m, n), np.complex128(1))
self.check_2d(func)
# and that if a dtype is specified, this influences the return type
def func(m, n):
return np.full((m, n), 1, dtype=np.int8)
self.check_2d(func)
def test_2d_shape_dtypes(self):
# Test for issue #4575
def func1(m, n):
return np.full((np.int16(m), np.int32(n)), 4.5)
self.check_2d(func1)
# Using a 64-bit value checks that 32 bit systems will downcast to intp
def func2(m, n):
return np.full((np.int64(m), np.int8(n)), 4.5)
self.check_2d(func2)
# Make sure an error is thrown if we can't downcast safely
if config.IS_32BITS:
cfunc = nrtjit(lambda m, n: np.full((m, n), 4.5))
with self.assertRaises(ValueError):
cfunc(np.int64(1 << (32 - 1)), 1)
def test_alloc_size(self):
width = types.intp.bitwidth
def gen_func(shape, value):
return lambda : np.full(shape, value)
# Under these values numba will segfault, but thats another issue
self.check_alloc_size(gen_func(1 << width - 2, 1))
self.check_alloc_size(gen_func((1 << width - 8, 64), 1))
class ConstructorLikeBaseTest(object):
def mutate_array(self, arr):
try:
arr.fill(42)
except (TypeError, ValueError):
# Try something else (e.g. Numpy 1.6 with structured dtypes)
fill_value = b'x' * arr.dtype.itemsize
arr.fill(fill_value)
def check_like(self, pyfunc, dtype):
def check_arr(arr):
expected = pyfunc(arr)
ret = cfunc(arr)
self.assertEqual(ret.size, expected.size)
self.assertEqual(ret.dtype, expected.dtype)
self.assertStridesEqual(ret, expected)
self.check_result_value(ret, expected)
# test writability
self.mutate_array(ret)
self.mutate_array(expected)
np.testing.assert_equal(ret, expected)
orig = np.linspace(0, 5, 6).astype(dtype)
cfunc = nrtjit(pyfunc)
for shape in (6, (2, 3), (1, 2, 3), (3, 1, 2), ()):
if shape == ():
arr = orig[-1:].reshape(())
else:
arr = orig.reshape(shape)
check_arr(arr)
# Non-contiguous array
if arr.ndim > 0:
check_arr(arr[::2])
# Check new array doesn't inherit readonly flag
arr.flags['WRITEABLE'] = False
# verify read-only
with self.assertRaises(ValueError):
arr[0] = 1
check_arr(arr)
# Scalar argument => should produce a 0-d array
check_arr(orig[0])
class TestNdEmptyLike(ConstructorLikeBaseTest, TestCase):
def setUp(self):
super(TestNdEmptyLike, self).setUp()
self.pyfunc = np.empty_like
def check_result_value(self, ret, expected):
pass
def test_like(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, np.float64)
def test_like_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr)
self.check_like(func, dtype)
def test_like_dtype(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, np.int32)
self.check_like(func, np.float64)
def test_like_dtype_instance(self):
dtype = np.dtype('int32')
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_structured(self):
dtype = np.dtype([('a', np.int16), ('b', np.float32)])
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype)
self.check_like(func, np.float64)
def test_like_dtype_kwarg(self):
pyfunc = self.pyfunc
def func(arr):
return pyfunc(arr, dtype=np.int32)
self.check_like(func, np.float64)
class TestNdZerosLike(TestNdEmptyLike):
def setUp(self):
super(TestNdZerosLike, self).setUp()
self.pyfunc = np.zeros_like
def check_result_value(self, ret, expected):
np.testing.assert_equal(ret, expected)
def test_like_structured(self):
super(TestNdZerosLike, self).test_like_structured()
def test_like_dtype_structured(self):
super(TestNdZerosLike, self).test_like_dtype_structured()
class TestNdOnesLike(TestNdZerosLike):
def setUp(self):
super(TestNdOnesLike, self).setUp()
self.pyfunc = np.ones_like
self.expected_value = 1
# Not supported yet.
@unittest.expectedFailure
def test_like_structured(self):
super(TestNdOnesLike, self).test_like_structured()
@unittest.expectedFailure
def test_like_dtype_structured(self):
super(TestNdOnesLike, self).test_like_dtype_structured()
@unittest.skipIf(numpy_version < (1, 8), "test requires Numpy 1.8 or later")
class TestNdFullLike(ConstructorLikeBaseTest, TestCase):
def check_result_value(self, ret, expected):
| np.testing.assert_equal(ret, expected) | numpy.testing.assert_equal |
import os
import numpy as np
import pandas as pd
from tensorflow.python.keras import layers
from tensorflow.python.keras.models import Model
from barrage import BarrageModel
from barrage.utils import io_utils
NUM_SAMPLES_TRAIN = 613
NUM_SAMPLES_VALIDATION = 216
NUM_SAMPLES_SCORE = 297
def gen_records(num_samples):
# Classification output and weight
y_cls = np.random.randint(0, 3, num_samples).astype(np.float32)
w_cls = y_cls * 0.1 + 1
# x = input 1
x1 = np.random.normal(0, 2.0, num_samples) + y_cls
x2 = np.random.normal(-1.0, 0.25, num_samples) + y_cls
x3 = np.random.normal(1.0, 0.1, num_samples) + y_cls
# z = input 2
z1 = np.random.normal(0.5, 0.25, num_samples) + y_cls
z2 = np.random.randint(-1, 2, num_samples).astype(np.float32)
# Regression output and temporal weights
y_reg_1 = -0.2 * x1 + 0.3 * x2 + 0.4 * x3 + | np.random.normal(0, 0.01, num_samples) | numpy.random.normal |
""" Helper methods for loading and parsing KITTI data.
Author: <NAME>
Date: September 2017
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
import cv2
import os
class Object3d(object):
""" 3d object label """
def __init__(self, label_file_line=None):
# each object represents a line in the %06d.txt
# that is each object3D represent a instance object in the image
# Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92
# 1.89 0.48 1.20 1.84 1.47 8.41 0.01
data = label_file_line.split(' ')
data[1:] = [float(x) for x in data[1:]]
# extract label, truncation, occlusion
self.type = data[0] # 'Car', 'Pedestrian', ...
self.truncation = data[1] # truncated pixel ratio [0..1]
self.occlusion = int(data[2]) # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
self.alpha = data[3] # object observation angle [-pi..pi]
# extract 2d bounding box in 0-based coordinates
self.xmin = data[4] # left
self.ymin = data[5] # top
self.xmax = data[6] # right
self.ymax = data[7] # bottom
self.box2d = np.array([self.xmin,self.ymin,self.xmax,self.ymax])
# extract 3d bounding box information
self.h = data[8] # box height
self.w = data[9] # box width
self.l = data[10] # box length (in meters)
self.t = (data[11],data[12],data[13]) # location (x,y,z) in camera coord.
# self.ry is the rotation angle around the y-axis
self.ry = data[14] # yaw angle (around Y-axis in camera coordinates) [-pi..pi]
if len(data) == 16:
self.score = data[15]
def print_object(self):
print('Type, truncation, occlusion, alpha: %s, %d, %d, %f' % \
(self.type, self.truncation, self.occlusion, self.alpha))
print('2d bbox (x0,y0,x1,y1): %f, %f, %f, %f' % \
(self.xmin, self.ymin, self.xmax, self.ymax))
print('3d bbox h,w,l: %f, %f, %f' % \
(self.h, self.w, self.l))
print('3d bbox location, ry: (%f, %f, %f), %f' % \
(self.t[0],self.t[1],self.t[2],self.ry))
class Calibration(object):
''' Calibration matrices and utils
# this is used to multi the
3d XYZ in <label>.txt are in rect camera coord.
2d box xy are in image2 coord
Points in <lidar>.bin are in Velodyne coord.
y_image2 = P^2_rect * x_rect
y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo
x_ref = Tr_velo_to_cam * x_velo
x_rect = R0_rect * x_ref
P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;
0, f^2_v, c^2_v, -f^2_v b^2_y;
0, 0, 1, 0]
= K * [1|t]
image2 coord:
----> x-axis (u)
|
|
v y-axis (v)
velodyne coord:
front x, left y, up z
rect/ref camera coord:
right x, down y, front z
Ref (KITTI paper): http://www.cvlibs.net/publications/Geiger2013IJRR.pdf
TODO(rqi): do matrix multiplication only once for each projection.
'''
def __init__(self, calib_filepath, from_video=False, calib=None):
if calib:
calibs = calib
elif from_video:
calibs = self.read_calib_from_video(calib_filepath)
else:
calibs = self.read_calib_file(calib_filepath)
# Projection matrix from rect camera coord to image2 coord
# P2 means the left color image in the kitti dataset
# which is the images we used
self.P = calibs['P2']
self.P = np.reshape(self.P, [3, 4])
# Rigid transform from Velodyne coord to reference camera coord
# the velo_to_cam matrix
self.V2C = calibs['Tr_velo_to_cam']
self.V2C = np.reshape(self.V2C, [3,4])
# the cam_to_velo matrix [3, 4]
self.C2V = inverse_rigid_trans(self.V2C)
# Rotation from reference camera coord to rect camera coord
# rotating the reference camera coord from non-rect coord to rect camera coord
# thus, we could print the lidar point by multi the rect lidar points
self.R0 = calibs['R0_rect']
self.R0 = | np.reshape(self.R0,[3,3]) | numpy.reshape |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.