prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 10:17:51 2020
description : Sampling methods from sir space to data space
author : bveitch
version : 1.0
project : EpidPy (epidemic modelling in python)
Usage:
Data fitting in least squares fit
Also acts on labels for data description and fitting
randSampler: adds random noise/sampling bias to synthetics
"""
import numpy as np;
def labels_from_type(sir_type,nstage=0):
if(sir_type == 'sir'):
return list(sir_type)
elif(sir_type == 'seir'):
return list(sir_type)
elif(sir_type == 'sirs'):
return list(sir_type)
elif(sir_type == 'si2r'):
return ['s','i1','i2','r']
elif(sir_type == 'sir_nstage'):
assert(nstage>0),'nstage must be >0'
labels=['s']
[labels.append('i'+str(istage)) for istage in range(nstage)]
labels.append('r')
return labels
else:
raise ValueError("Invalid modelling type")
class Sampler:
def __init__(self, nsize,intervals):
for ival in intervals:
assert(ival[1] > ival[0]),'interval must be increasing'
self.insize = nsize
self.intervals = intervals
self.outsize = len(intervals)
def F(self,data):
[nt,nsize]=data.shape
assert(nsize == self.insize),"data size doesnt match insize"
data_samp=np.zeros((nt,self.outsize))
icount=0
for ival in self.intervals:
isum = np.zeros(nt)
for i in range(ival[0],ival[1]):
isum += data[:,i]
# isum=np.sum(data[:,ival[0]:ival[1]],1)
data_samp[:,icount]=isum
icount+=1
return data_samp
def Ft(self,data_samp):
[nt,isize]=data_samp.shape
assert(isize == self.outsize),"data sampled size doesnt match outsize"
data=np.zeros((nt,self.insize))
icount=0
for ival in self.intervals:
isum = data_samp[:,icount]
# data[:,ival[0]:ival[1]] +=isum
for i in range(ival[0],ival[1]):
data[:,i]+=isum
icount+=1
return data
def Flabels(self,labels):
sampled_labels=[]
for ival in self.intervals:
if(ival[1] == ival[0]+1):
l0=labels[ival[0]]
sampled_labels.append(l0)
else:
l0=labels[ival[0] ]
l1=labels[ival[1]-1]
sampled_labels.append(l0 + '-' + l1)
return sampled_labels
def Flabels_from_type(self,sir_type,nstage=0):
labels=labels_from_type(sir_type,nstage)
return self.Flabels(labels)
#def draw_samples(pT,n,Ntests):
# np.random.seed(0)
# s = np.random.normal(0, 1,n)
# mu = Ntests*pT
# sigma = np.sqrt(Ntests*pT*(1-pT))
# data_samp = (sigma*s+mu)/Ntests
# return data_samp
def randSampler(data,pTT,pTF,Ntests):
def draw_samples(pT,n,Ntests):
np.random.seed(0)
s = np.random.normal(0, 1,n)
mu = Ntests*pT
sigma = | np.sqrt(Ntests*pT*(1-pT)) | numpy.sqrt |
"""With the goal of a 2p2c simulation, implement incompressible 2p1c.
Credit and first contact: <NAME>.
"""
import time
import porepy as pp
import numpy as np
import scipy.sparse as sps
import scipy.optimize as spo
from porepy.numerics.fv.generaltpfaad import (
GeneralTpfaAd,
UpwindAd,
FluxBasedUpwindAd,
HarmAvgAd,
)
from porepy.numerics.ad.functions import (
exp,
sin,
cos,
tanh,
heaviside,
RegularizedHeaviside,
)
from porepy.numerics.ad.equation_manager import Expression
from porepy.numerics.ad.operators import SecondOrderTensorAd
from porepy.numerics.ad.grid_operators import DirBC
from porepy.numerics.ad.forward_mode import initAdArrays
from porepy.numerics.solvers.andersonacceleration import AndersonAcceleration
# Material parameters
# Rock - arbitrary, exact solution will integrate these
porosity = 1e-1
permeability_value = 1e-12
# Brine
rho_b = 1000 # kg m**-3
mu_b = 300.3 * 1e-6 # Pa s
kappa_b = 3 # 1 - arbitrary
def lambda_b(sat_b, sat_c):
sat_b_scaled = sat_b / (sat_b + sat_c)
return sat_b_scaled ** kappa_b / mu_b
# CO2
rho_c = 750 # kg m**-3
mu_c = 42.5 * 1e-6 # Pa s
kappa_c = 3 # 1 - arbitrary
krc_max = 0.4 # 1 - arbitrary
def lambda_c(sat_b, sat_c):
sat_c_scaled = sat_c / (sat_b + sat_c)
return krc_max * sat_c_scaled ** kappa_c / mu_c
# Define fractional flow function as function of merely sat_c; and its derivative.
def total_lambda(sat_c):
return lambda_b(1 - sat_c, sat_c) + lambda_c(1 - sat_c, sat_c)
def frac_c(sat_c):
return lambda_c(1 - sat_c, sat_c) / total_lambda(sat_c)
def lambda_b_prime(sat_c):
return -kappa_b * (1 - sat_c) ** (kappa_b - 1) / mu_b
def lambda_c_prime(sat_c):
return krc_max * kappa_c * sat_c ** (kappa_c - 1) / mu_c
def frac_c_prime(sat_c):
return lambda_c_prime(sat_c) / total_lambda(sat_c) - lambda_c(
1 - sat_c, sat_c
) / total_lambda(sat_c) ** 2 * (lambda_b_prime(sat_c) + lambda_c_prime(sat_c))
# Setup grid, gridbucket and data
dx = 1000
dy = 200
dz = 40
N = 50
gb = pp.meshing.cart_grid([], nx=[N, 1, 1], physdims=[dx, dy, dz])
g = gb.grids_of_dimension(3)[0]
d = gb.node_props(g)
# Time step size
one_day = 24 * 60 * 60 # in seconds
final_time = 100 * one_day
current_time = 0
dt = 0.1 * one_day
num_time_steps = 10 # int(final_time / dt)
# Define keywords
pressure_variable = "pressure"
brine_saturation_variable = "brine"
co2_saturation_variable = "co2"
flow_kw = "flow"
# Flow parameters
perm = SecondOrderTensorAd(permeability_value * np.ones(g.num_cells))
# Injection over whole cross section; rest impermeable
all_faces = g.tags["domain_boundary_faces"].nonzero()[0]
injection_faces = all_faces[g.face_centers[0, all_faces] < 1e-6]
extraction_faces = all_faces[g.face_centers[0, all_faces] > dx - 1e-6]
bc_labels = np.array(["neu"] * all_faces.size)
bc_labels[injection_faces] = "dir"
bc_flow = pp.BoundaryCondition(g, all_faces, bc_labels)
bc_val_flow = np.zeros(g.num_faces)
one_year = 365 * one_day
injection_area = dy * dz
p_ref = 5e6 # Pa
q_ext = 0.1 # ca. 1.427e6 / one_year / porosity # m**3/s
bc_val_flow[injection_faces] = p_ref
bc_val_flow[extraction_faces] = q_ext # outflow = inflow
flow_param = {"second_order_tensor": perm, "bc": bc_flow, "bc_values": bc_val_flow}
pp.initialize_default_data(g, d, flow_kw, flow_param)
# AD Boundary condition
bc_flow = pp.ad.BoundaryCondition(flow_kw, [g])
dir_bc_flow = DirBC(bc_flow, [g])
# Primary variables / DOFs
d[pp.PRIMARY_VARIABLES] = {
pressure_variable: {"cells": 1},
brine_saturation_variable: {"cells": 1},
co2_saturation_variable: {"cells": 1},
}
# Initialize current states - constant reference pressure; 100% saturated with brine.
pressure_state = p_ref * | np.ones(g.num_cells) | numpy.ones |
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import argparse, sys, time, math, json
from scipy.fftpack import fft, fftshift, ifft
class Waterfall():
"""Waterfall Tool Main Class"""
def __init__(self, fs, fc, f_chan, BW):
self.fs = fs
self.BW = BW
self.fc = fc
self.f_chan = f_chan
def calFFT(self, sig):
norm_fft = (1/self.fs)*fftshift(fft(sig))
abs_fft = np.abs(norm_fft)
return abs_fft
def calFFTPower(self, afft, fs):
transform = 10 * np.log10(afft/127)
return transform
def run(self,filename, save_flag=False):
"""Loads the data and performs fft in chunks.
Params:
filename: str
name of the iq file in .wav
save_flag: bool
Enable to the save the fft of iq in .npy
which will autoload next time.
"""
self.overlap = 0.5
offset = 44
T = 1/fs
iterate = 0
adc_offset = -127.5
window = self.fs
self.filename = filename
self.save_flag = save_flag
data = np.memmap(filename, offset=offset)
self.total_duration = T*(len(data)/2)
self.num_chunks = int(len(data)/(window*2))
file_name = 'Spec_'+self.filename.split('.wav')[0]+'.npy'
# self.filename = self.filename.split('/')[-1].split('.wav')[0]
#ErrorHandling:
if len(self.BW) > 1:
if not len(self.BW) == len(self.f_chan):
print('Error: Number of bw need to be equal to number of f_chan given')
sys.exit()
elif len(self.f_chan) > 1:
self.BW = self.BW * len(self.f_chan)
for j in range(len(self.f_chan)):
if | np.abs(self.f_chan[j] - self.fc) | numpy.abs |
# encoding: utf-8
#
# @Author: <NAME>, <NAME>
# @Date: Nov 15, 2021
# @Filename: ism.py
# @License: BSD 3-Clause
# @Copyright: <NAME>, <NAME>
import os.path
from astropy import units as u
from astropy import constants as c
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Table
from scipy.special import sph_harm
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Sersic2D
from dataclasses import dataclass
import sys
if (sys.version_info[0]+sys.version_info[1]/10.) < 3.8:
from backports.cached_property import cached_property
else:
from functools import cached_property
from scipy.ndimage.interpolation import map_coordinates
from scipy.interpolate import interp1d, interp2d
import lvmdatasimulator
from lvmdatasimulator import log
import progressbar
from joblib import Parallel, delayed
from astropy.convolution import convolve_fft, kernels
from lvmdatasimulator.utils import calc_circular_mask, convolve_array, set_default_dict_values, \
ism_extinction, check_overlap, assign_units
fluxunit = u.erg / (u.cm ** 2 * u.s * u.arcsec ** 2)
velunit = u.km / u.s
def brightness_inhomogeneities_sphere(harm_amplitudes, ll, phi_cur, theta_cur, rho, med, radius, thickness):
"""
Auxiliary function producing the inhomogeneities on the brightness distribution for the Cloud of Bubble objects
using the spherical harmonics.
"""
brt = theta_cur * 0
for m in np.arange(-ll, ll + 1):
brt += (harm_amplitudes[m + ll * (ll + 1) - 1] * sph_harm(m, ll, phi_cur, theta_cur).real * med *
(1 - np.sqrt(abs(rho.value ** 2 / radius.value ** 2 - (1 - thickness / 2) ** 2))))
return brt
def sphere_brt_in_line(brt_3d, rad_3d, rad_model, flux_model):
"""
Auxiliary function computing the brightness of the Cloud or Bubble at given radii and in given line
according to the Cloudy models
"""
p = interp1d(rad_model, flux_model, fill_value='extrapolate', assume_sorted=True)
return p(rad_3d) * brt_3d
def interpolate_sphere_to_cartesian(spherical_array, x_grid=None, y_grid=None, z_grid=None,
rad_grid=None, theta_grid=None, phi_grid=None, pxscale=1. * u.pc):
"""
Auxiliary function to project the brightness or velocities from the spherical to cartesian coordinates
"""
x, y, z = np.meshgrid(x_grid, y_grid, z_grid, indexing='ij')
phi_c, theta_c, rad_c = xyz_to_sphere(x, y, z, pxscale=pxscale)
ir = interp1d(rad_grid, np.arange(len(rad_grid)), bounds_error=False)
ith = interp1d(theta_grid, np.arange(len(theta_grid)))
iphi = interp1d(phi_grid, np.arange(len(phi_grid)))
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_iphi = iphi(phi_c.ravel())
cart_data = map_coordinates(spherical_array, np.vstack([new_ir, new_ith, new_iphi]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(x_grid), len(y_grid), len(z_grid)]).T
def limit_angle(value, bottom_limit=0, top_limit=np.pi):
"""
Auxiliary function to limit the angle values to the range of [0, pi]
"""
value[value < bottom_limit] += (top_limit - bottom_limit)
value[value > top_limit] -= (top_limit - bottom_limit)
return value
def xyz_to_sphere(x, y, z, pxscale=1. * u.pc):
"""
Auxiliary function to map the coordinates from cartesian to spherical system
"""
phi_c = np.arctan2(y, x)
rad_c = (np.sqrt(x ** 2 + y ** 2 + z ** 2))
rad_c[rad_c == 0 * u.pc] = 1e-3 * pxscale
theta_c = (np.arccos(z / rad_c))
phi_c = limit_angle(phi_c, 0 * u.radian, 2 * np.pi * u.radian)
theta_c = limit_angle(theta_c, 0 * u.radian, np.pi * u.radian)
return phi_c, theta_c, rad_c
def find_model_id(file=lvmdatasimulator.CLOUDY_MODELS,
check_id=None, params=lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']):
"""
Checks the input parameters of the pre-computed Cloudy model and return corresponding index in the grid
"""
with fits.open(file) as hdu:
if check_id is None:
if params is None:
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning(f'Default Cloudy model will be used (id = {check_id})')
else:
summary_table = Table(hdu['Summary'].data)
indexes = np.arange(len(summary_table)).astype(int)
rec_table = np.ones(shape=len(summary_table), dtype=bool)
def closest(rec, prop, val):
unique_col = np.unique(summary_table[prop][rec])
if isinstance(val, str):
res = unique_col[unique_col == val]
if len(res) == 0:
return ""
return res
else:
return unique_col[np.argsort(np.abs(unique_col - val))[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
rec_table = rec_table & (summary_table[p] == closest(indexes, p, params[p]))
indexes = np.flatnonzero(rec_table)
if len(indexes) == 0:
break
if len(indexes) == 0 or len(indexes) == len(summary_table):
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
'Default Cloudy model will be used (id = {0})'.format(check_id))
elif len(indexes) == 1:
check_id = summary_table['Model_ID'][indexes[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or
isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
if params[p] != summary_table[p][indexes[0]]:
log.warning(f'Use the closest pre-computed Cloudy model with id = {check_id}')
break
else:
check_id = summary_table['Model_ID'][indexes[0]]
log.warning(f'Select one of the closest pre-computed Cloudy model with id = {check_id}')
#
# for cur_ext in range(len(hdu)):
# if cur_ext == 0:
# continue
# found = False
# for p in params:
# if p == 'id':
# continue
# precision = 1
# if p == 'Z':
# precision = 2
# if np.round(params[p], precision) != np.round(hdu[cur_ext].header[p], precision):
# break
# else:
# found = True
# if found:
# return cur_ext, check_id
# check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
# log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
# 'Default Cloudy model will be used (id = {0})'.format(check_id))
extension_index = None
while extension_index is None:
extension_index = [cur_ext for cur_ext in range(len(hdu)) if (
check_id == hdu[cur_ext].header.get('MODEL_ID'))]
if len(extension_index) == 0:
if check_id == lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use the first one in the grid instead'.format(check_id))
extension_index = 1
else:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use default ({1}) instead'.format(check_id,
lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']))
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
extension_index = None
else:
extension_index = extension_index[0]
return extension_index, check_id
@dataclass
class Nebula:
"""
Base class defining properties of every nebula type.
By itself it describes the rectangular nebula (e.g. DIG)
Constructed nebula has 4 dimensions, where 4th derive its appearance in different lines
(if spectrum_id is None, or if it is dark nebula => only one line)
"""
xc: int = None # Center of the region in the field of view, pix
yc: int = None # Center of the region in the field of view, pix
x0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
y0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
pix_width: int = None # full width of cartesian grid, pix (should be odd)
pix_height: int = None # full height of cartesian grid, pix (should be odd)
width: u.pc = 0 * u.pc # width of the nebula in pc (not used if pix_width is set up)
height: u.pc = 0 * u.pc # height of the nebula in pc (not used if pix_height is set up)
pxscale: u.pc = 0.01 * u.pc # pixel size in pc
spectrum_id: int = None # ID of a template Cloudy emission spectrum for this nebula
n_brightest_lines: int = None # limit the number of the lines to the first N brightest
sys_velocity: velunit = 0 * velunit # Systemic velocity
turbulent_sigma: velunit = 10 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
max_brightness: fluxunit = 1e-15 * fluxunit
max_extinction: u.mag = 0 * u.mag
perturb_scale: int = 0 * u.pc # Spatial scale of correlated perturbations
perturb_amplitude: float = 0.1 # Maximal amplitude of perturbations
_npix_los: int = 1 # full size along line of sight in pixels
nchunks: int = -1 # number of chuncks to use for the convolution. If negative, select automatically
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the nebula
vel_pa: u.degree = 0 # Position angle of the kinematical axis (for the velocity gradient or rotation velocity)
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
def _assign_all_units(self):
whole_list_properties = ['pxscale', 'sys_velocity', 'turbulent_sigma', 'max_brightness', 'max_extinction',
'perturb_scale', 'radius', 'PA', 'length', 'width', 'vel_gradient', 'r_eff',
'vel_rot', 'expansion_velocity', 'spectral_axis', 'vel_pa']
whole_list_units = [u.pc, velunit, velunit, fluxunit, u.mag, u.pc, u.pc, u.degree, u.pc, u.pc,
(velunit / u.pc), u.kpc, velunit, velunit, velunit, u.degree]
cur_list_properties = []
cur_list_units = []
for prp, unit in zip(whole_list_properties, whole_list_units):
if hasattr(self, prp):
cur_list_properties.append(prp)
cur_list_units.append(unit)
assign_units(self, cur_list_properties, cur_list_units)
def _assign_position_params(self, conversion_type='rect'):
if conversion_type == 'rect':
for v in ['height', 'width']:
if self.__getattribute__(f'pix_{v}') is None:
val = np.round((self.__getattribute__(v) / self.pxscale).value / 2.).astype(int) * 2 + 1
else:
val = np.round(self.__getattribute__(f'pix_{v}') / 2.).astype(int) * 2 + 1
setattr(self, f'pix_{v}', val)
elif conversion_type == 'ellipse':
self.pix_width = (np.round(np.abs(self.radius / self.pxscale * np.sin(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.radius / self.pxscale * np.cos(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'galaxy':
self.pix_width = (np.round(np.abs(self.r_max * np.sin(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.r_max * np.cos(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'cylinder':
self.pix_width = (np.ceil((self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
self.pix_height = (np.ceil((self.length * np.abs(np.cos(self.PA)) +
self.width * np.abs(np.sin(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - np.round((self.pix_width - 1) / 2).astype(int)
self.y0 = self.yc - np.round((self.pix_height - 1) / 2).astype(int)
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + np.round((self.pix_width - 1) / 2).astype(int)
self.yc = self.y0 + np.round((self.pix_height - 1) / 2).astype(int)
@cached_property
def _cartesian_x_grid(self):
return np.arange(self.pix_width) * self.pxscale
@cached_property
def _cartesian_y_grid(self):
return np.arange(self.pix_height) * self.pxscale
@cached_property
def _cartesian_z_grid(self):
return np.arange(self._npix_los) * self.pxscale
@cached_property
def _max_density(self):
return self.max_extinction * (1.8e21 / (u.cm ** 2 * u.mag))
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
brt = np.ones(shape=(self.pix_height, self.pix_width, self._npix_los), dtype=float) / self._npix_los
if (self.perturb_scale > 0) and (self.perturb_amplitude > 0):
pertscale = (self.perturb_scale / self.pxscale).value
perturb = np.random.uniform(-1, 1, (self.pix_height, self.pix_width)
) * self.perturb_amplitude / self._npix_los
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
f = np.exp(-2 * (xx ** 2 + yy ** 2) / pertscale)
perturb = 4 / np.sqrt(np.pi) / pertscale * np.fft.ifft2(np.fft.fft2(perturb) * np.fft.fft2(f)).real
brt += (perturb[:, :, None] - np.median(perturb))
return brt
@cached_property
def _brightness_4d_cartesian(self):
"""
Derive the brightness (or density) distribution of the nebula for each emission line in cartesian coordinates
"""
if self.spectrum_id is None or self.linerat_constant:
flux_ratios = np.array([1.])
else:
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
flux_ratios = hdu[self.spectrum_id].data[1:, 1]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0] == 6562.81)
if self.n_brightest_lines is not None and \
(self.n_brightest_lines > 0) and (self.n_brightest_lines < len(flux_ratios)):
indexes_sorted = np.argsort(flux_ratios)[::-1]
flux_ratios = flux_ratios[indexes_sorted[: self.n_brightest_lines]]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0][indexes_sorted] == 6562.81)
if len(index_ha) == 1:
self._ref_line_id = index_ha[0]
return self._brightness_3d_cartesian[None, :, :, :] * flux_ratios[:, None, None, None]
@cached_property
def brightness_skyplane(self):
"""
Project the 3D nebula onto sky plane (for emission or continuum sources)
"""
if self.max_brightness > 0:
norm_max = self.max_brightness
else:
norm_max = 1
map2d = np.nansum(self._brightness_3d_cartesian, 2)
return map2d / np.nanmax(map2d) * norm_max
@cached_property
def brightness_skyplane_lines(self):
"""
Project the 3D emission nebula line onto sky plane (return images in each emission line)
"""
if self.max_brightness > 0:
map2d = np.nansum(self._brightness_4d_cartesian, 3)
return map2d / np.nanmax(map2d[self._ref_line_id, :, :]) * self.max_brightness
else:
return None
@cached_property
def extinction_skyplane(self):
"""
Project the 3D nebula onto sky plane (for dark clouds)
"""
if self.max_extinction > 0:
map2d = np.nansum(self._brightness_3d_cartesian, 2)
return map2d / np.nanmax(map2d) * self._max_density / (1.8e21 / (u.cm ** 2 * u.mag))
else:
return None
@cached_property
def vel_field(self):
return self._get_2d_velocity()
# if vel_field is None:
# return np.atleast_1d(self.sys_velocity)
# else:
# return vel_field + self.sys_velocity
def _get_2d_velocity(self):
if hasattr(self, 'vel_gradient') and (self.vel_gradient is not None) and (self.vel_gradient != 0):
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
vel_field = (- (xx - (self.pix_width - 1) / 2) * np.sin(self.vel_pa) +
(yy - (self.pix_height - 1) / 2) * np.cos(self.vel_pa)) * self.pxscale * self.vel_gradient
return vel_field
else:
return None
# @cached_property
# def line_profile(self):
# lprf = np.zeros(shape=len(self.los_velocity), dtype=float)
# lprf[np.floor(len(lprf) / 2.).astype(int)] = 1.
# return lprf
@dataclass
class Rectangle(Nebula):
"""
Class defining a simple rectangular component.
This is equal to Nebula, but no perturbations and turbulence by default
"""
perturb_amplitude: float = 0.0 # Maximal amplitude of perturbations
turbulent_sigma: velunit = 0 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@dataclass
class Ellipse(Nebula):
"""
Class defining a simple elliptical component.
No perturbations and turbulence by default
"""
perturb_amplitude: float = 0.0 # Maximal amplitude of perturbations
turbulent_sigma: velunit = 0 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
radius: u.pc = 1.0 * u.pc # Radius along the major axis of the ellipse (or radius of the circle)
PA: u.degree = 90 * u.degree # position angle of the major axis
ax_ratio: float = 1. # ratio of minor/major axes
def __post_init__(self):
self._assign_all_units()
self._npix_los = 1
self._assign_position_params(conversion_type='ellipse')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
brt = np.ones(shape=(self.pix_height, self.pix_width), dtype=np.float32)
angle = (self.PA + 90 * u.degree).to(u.radian).value
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rmaj = (self.radius.to(u.pc) / self.pxscale.to(u.pc)).value
rmin = (self.radius.to(u.pc) / self.pxscale.to(u.pc)).value * self.ax_ratio
rec = (xct ** 2 / rmaj ** 2) + (yct ** 2 / rmin ** 2) >= 1
brt[rec] = 0
brt = brt.reshape((self.pix_height, self.pix_width, 1))
return brt
@dataclass
class Circle(Ellipse):
"""
Class defining a simple circular component.
"""
def __post_init__(self):
self._assign_all_units()
self.ax_ratio = 1.
self._npix_los = 1
self._assign_position_params(conversion_type='ellipse')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@dataclass
class Filament(Nebula):
"""
Class of an isotropic cylindrical shape filament.
Defined by its position, length, PA, radius, maximal optical depth.
If it is emission-type filament, then also maximal brightness is required.
Velocity gradient also can be set up
"""
PA: u.degree = 90 * u.degree # position angle of the filament
length: u.pc = 10 * u.pc # full length of the filament
width: u.pc = 0.1 * u.pc # full width (diameter) of the filament
def __post_init__(self):
self._assign_all_units()
self._assign_position_params(conversion_type='cylinder')
self._npix_los = 1
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
brt = np.zeros_like(xx, dtype=np.float32)
xct = (xx - (self.pix_width - 1) / 2) * np.cos(self.PA + 90 * u.degree) + \
(yy - (self.pix_height - 1) / 2) * np.sin(self.PA + 90 * u.degree)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(self.PA + 90 * u.degree) - \
(yy - (self.pix_height - 1) / 2) * np.cos(self.PA + 90 * u.degree)
rad = ((self.width / self.pxscale).value / 2.)
len_px = ((self.length / self.pxscale).value / 2.)
rec = (np.abs(yct) <= rad) & (np.abs(xct) <= len_px)
brt[rec] = np.sqrt(1. - (yct[rec] / rad) ** 2)
brt = brt.reshape((self.pix_height, self.pix_width, 1))
return brt
@dataclass
class _ObsoleteFilament(Nebula):
"""
Class of an isotropic cylindrical shape filament.
Defined by its position, length, PA, radius, maximal optical depth
if it is emission-type filament, then maximal brightness
NB: this class is obsolete, but might be considered later in case of implementation of varying line ratios
"""
PA: u.degree = 90 * u.degree # position angle of the filament
length: u.pc = 10 * u.pc # full length of the filament
width: u.pc = 0.1 * u.pc # full width (diameter) of the filament
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the filament (to be added)
_theta_bins: int = 50
_rad_bins: int = 0
_h_bins: int = 2
_npix_los: int = 101
def __post_init__(self):
self._assign_all_units()
if self._rad_bins == 0:
self._rad_bins = np.ceil(self.width.to(u.pc).value / self.pxscale.to(u.pc).value * 5).astype(int)
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
self.y0 = self.yc - np.round((len(self._cartesian_z_grid) - 1) / 2).astype(int)
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
self.yc = self.y0 + np.round((len(self._cartesian_z_grid) - 1) / 2).astype(int)
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _theta_grid(self):
return np.linspace(0, 2 * np.pi, self._theta_bins)
@cached_property
def _h_grid(self):
return np.linspace(0, self.length, self._h_bins)
@cached_property
def _rad_grid(self):
return np.linspace(0, self.width / 2, self._rad_bins)
@cached_property
def _cartesian_y_grid(self):
npix = np.ceil(1.01 * (self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale).astype(int)
npix_l = npix / 2 - np.ceil(self.length / 2 * np.sin(-self.PA) / self.pxscale).astype(int)
return (np.linspace(0, npix, npix + 1) - npix_l) * self.pxscale
@cached_property
def _cartesian_z_grid(self):
npix = np.ceil(1.01 * (self.length * np.abs(np.cos(self.PA)) +
self.width * np.abs(np.sin(self.PA))) / self.pxscale).astype(int)
npix_l = npix / 2 - np.ceil(self.length / 2 * np.cos(-self.PA) / self.pxscale).astype(int)
return (np.linspace(0, npix, npix + 1) - npix_l) * self.pxscale
@cached_property
def _cartesian_x_grid(self):
return np.linspace(-1.01, 1.01, self._npix_los) * self.width / 2
@cached_property
def _brightness_3d_cylindrical(self):
"""
Method to calculate brightness (or opacity) of the cloud at given theta, phi and radii
theta: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.width / 2]
h: float -- height [0, self.length]
Returns:
3D cube of normalized brightness in theta-rad-h grid; total brightness = 1
"""
rho, theta, h = np.meshgrid(self._rad_grid, self._theta_grid, self._h_grid, indexing='ij')
brt = np.ones_like(theta)
brt[rho > (self.width / 2)] = 0
norm = np.sum(brt)
if norm > 0:
brt = brt / np.sum(brt)
return brt
@cached_property
def _brightness_3d_cartesian(self):
x, y, z = np.meshgrid(self._cartesian_x_grid, self._cartesian_y_grid,
self._cartesian_z_grid, indexing='ij')
h_c = -y * np.sin(self.PA) + z * np.cos(self.PA)
theta_c = np.arctan2(y * np.cos(self.PA) + z * np.sin(self.PA), x)
rad_c = np.sqrt(x ** 2 + (y * np.cos(self.PA) + z * np.sin(self.PA)) ** 2)
rad_c[rad_c == 0 * u.pc] = 1e-3 * self.pxscale
theta_c = limit_angle(theta_c, 0 * u.radian, 2 * np.pi * u.radian)
ir = interp1d(self._rad_grid, np.arange(self._rad_bins), bounds_error=False)
ith = interp1d(self._theta_grid, np.arange(self._theta_bins))
ih = interp1d(self._h_grid, np.arange(self._h_bins), bounds_error=False)
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_ih = ih(h_c.ravel())
cart_data = map_coordinates(self._brightness_3d_cylindrical,
np.vstack([new_ir, new_ith, new_ih]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(self._cartesian_x_grid),
len(self._cartesian_y_grid),
len(self._cartesian_z_grid)]).T
@dataclass
class Galaxy(Nebula):
"""
Class defining the galaxy object (set up it as Sersic2D profile assuming it has continuum and emission components)
"""
PA: u.degree = 90 * u.degree # position angle of the major axis
ax_ratio: float = 0.7 # ratio of minor/major axes
r_eff: u.kpc = 1 * u.kpc # Effective radius in kpc
rad_lim: float = 3. # Maximum radius for calculations (in R_eff)
n: float = 1. # Sersic index
vel_rot: velunit = 0 * velunit # Rotational velocity (not implemented yet)
def __post_init__(self):
self._assign_all_units()
self._npix_los = 1
self.r_max = self.r_eff.to(u.pc).value / self.pxscale.to(u.pc).value * self.rad_lim
self._assign_position_params(conversion_type='galaxy')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
angle = (self.PA + 90 * u.degree).to(u.radian).value
mod = Sersic2D(amplitude=1, r_eff=(self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value,
n=self.n, x_0=(self.pix_width - 1) / 2, y_0=(self.pix_height - 1) / 2,
ellip=1 - self.ax_ratio, theta=angle)
brt = mod(xx, yy)
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rmaj = self.rad_lim * (self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value
rmin = self.rad_lim * (self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value * self.ax_ratio
mask = np.ones_like(brt, dtype=np.float32)
rec = (xct ** 2 / rmaj ** 2) + (yct ** 2 / rmin ** 2) >= 1
mask[rec] = 0
mask = convolve_fft(mask, kernels.Gaussian2DKernel(3.), fill_value=0, allow_huge=True)
brt = brt * mask
brt = brt.reshape(self.pix_height, self.pix_width, 1)
return brt
def _get_2d_velocity(self):
if hasattr(self, 'vel_rot') and (self.vel_rot is not None) and (self.vel_rot != 0):
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
angle = (self.PA + 90 * u.degree).to(u.radian).value
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rad = np.sqrt(xct ** 2 + yct ** 2)
vel_field = np.zeros_like(xx, dtype=np.float32) * velunit
rec = rad > 0
vel_field[rec] = self.vel_rot * np.sqrt(1 - self.ax_ratio ** 2) * xct[rec] / rad[rec]
return vel_field
else:
return None
@dataclass
class DIG(Nebula):
"""
Class defining the DIG component. For now it is defined just by its brightness (constant)
"""
max_brightness: fluxunit = 1e-17 * fluxunit
vel_gradient: (velunit / u.pc) = 0
@dataclass
class Cloud(Nebula):
"""Class of an isotropic spherical gas cloud without any ionization source.
Defined by its position, radius, density, maximal optical depth"""
radius: u.pc = 1.0 * u.pc
max_brightness: fluxunit = 0 * fluxunit
max_extinction: u.mag = 2.0 * u.mag
thickness: float = 1.0
perturb_degree: int = 0 # Degree of perturbations (max. degree of spherical harmonics for cloud)
linerat_constant: bool = False # True if the ratio of line fluxes shouldn't change across the nebula
_phi_bins: int = 90
_theta_bins: int = 90
_rad_bins: int = 0
_npix_los: int = 100
def __post_init__(self):
self._assign_all_units()
if self._rad_bins == 0:
self._rad_bins = np.ceil(self.radius.to(u.pc).value / self.pxscale.to(u.pc).value * 3).astype(int)
delta = np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - delta
self.y0 = self.yc - delta
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + delta
self.yc = self.y0 + delta
self._ref_line_id = 0
@cached_property
def _theta_grid(self):
return np.linspace(0, np.pi, self._theta_bins)
@cached_property
def _phi_grid(self):
return np.linspace(0, 2 * np.pi, self._phi_bins)
@cached_property
def _rad_grid(self):
return np.linspace(0, self.radius, self._rad_bins)
@cached_property
def _cartesian_z_grid(self):
npix = np.ceil(1.02 * self.radius / self.pxscale).astype(int)
return np.linspace(-npix, npix, 2 * npix + 1) * self.pxscale
@cached_property
def _cartesian_y_grid(self):
return self._cartesian_z_grid.copy()
@cached_property
def _cartesian_x_grid(self):
return np.linspace(-1.02, 1.02, self._npix_los) * self.radius
@cached_property
def _brightness_3d_spherical(self):
"""
Method to calculate brightness (or opacity) of the cloud at given theta, phi and radii
theta: float -- polar angle [0, np.pi]
phi: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.radius]
Returns:
3D cube of normalized brightness in theta-phi-rad grid; total brightness = 1
"""
rho, theta, phi = np.meshgrid(self._rad_grid, self._theta_grid, self._phi_grid, indexing='ij')
brt = np.ones_like(theta)
brt[rho < (self.radius * (1 - self.thickness))] = 0
brt[rho > self.radius] = 0
med = np.median(brt[brt > 0])
if self.perturb_degree > 0:
phi_cur = limit_angle(phi + np.random.uniform(0, 2 * np.pi, 1), 0, 2 * np.pi)
theta_cur = limit_angle(theta + np.random.uniform(0, np.pi, 1), 0, np.pi)
harm_amplitudes = self.perturb_amplitude * np.random.randn(self.perturb_degree * (self.perturb_degree + 2))
brt += np.nansum(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(brightness_inhomogeneities_sphere)
(harm_amplitudes, ll, phi_cur, theta_cur,
rho, med, self.radius, self.thickness)
for ll in np.arange(1,
self.perturb_degree + 1)),
axis=0)
brt[brt < 0] = 0
if med > 0:
brt = brt / np.nansum(brt)
return brt
@cached_property
def _brightness_4d_spherical(self):
"""
Method to calculate brightness of the cloud at given theta, phi and radii for each line
theta: float -- polar angle [0, np.pi]
phi: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.radius]
Returns:
4D cube of brightness in line-theta-phi-rad grid; normalized to the total brightness in Halpha
"""
s = self._brightness_3d_spherical.shape
if self.spectrum_id is None or self.linerat_constant:
return self._brightness_3d_spherical.reshape((1, s[0], s[1], s[2]))
rho, _, _ = np.meshgrid(self._rad_grid, self._theta_grid, self._phi_grid, indexing='ij')
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
radius = hdu[self.spectrum_id].data[0, 2:] * (self.thickness * self.radius) + \
self.radius * (1 - self.thickness)
fluxes = hdu[self.spectrum_id].data[1:, 2:]
radius = np.insert(radius, 0, self.radius * (1 - self.thickness))
fluxes = np.insert(fluxes, 0, fluxes[:, 0], axis=1)
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0] == 6562.81)
if self.n_brightest_lines is not None and \
(self.n_brightest_lines > 0) and (self.n_brightest_lines < len(fluxes)):
indexes_sorted = np.argsort(hdu[self.spectrum_id].data[1:, 1])[::-1]
fluxes = fluxes[indexes_sorted[:self.n_brightest_lines], :]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0][indexes_sorted] == 6562.81)
if len(index_ha) == 1:
self._ref_line_id = index_ha[0]
brt = np.array(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(sphere_brt_in_line)
(self._brightness_3d_spherical, rho,
radius, flux)
for flux in fluxes)).reshape((fluxes.shape[0],
s[0], s[1], s[2]))
return brt / np.nansum(brt[self._ref_line_id])
@cached_property
def _brightness_3d_cartesian(self):
return interpolate_sphere_to_cartesian(self._brightness_3d_spherical, x_grid=self._cartesian_x_grid,
y_grid=self._cartesian_y_grid, z_grid=self._cartesian_z_grid,
rad_grid=self._rad_grid, theta_grid=self._theta_grid,
phi_grid=self._phi_grid, pxscale=self.pxscale)
@cached_property
def _brightness_4d_cartesian(self):
s = self._brightness_4d_spherical.shape
return np.array(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(interpolate_sphere_to_cartesian)
(cur_line_array,
self._cartesian_x_grid, self._cartesian_y_grid,
self._cartesian_z_grid, self._rad_grid,
self._theta_grid, self._phi_grid, self.pxscale)
for cur_line_array in self._brightness_4d_spherical)
).reshape((s[0], len(self._cartesian_z_grid), len(self._cartesian_y_grid),
len(self._cartesian_x_grid)))
@dataclass
class Bubble(Cloud):
"""Class of an isotropic thin expanding bubble."""
spectral_axis: velunit = np.arange(-20, 20, 10) * velunit
expansion_velocity: velunit = 20 * velunit
max_brightness: fluxunit = 1e-15 * fluxunit
max_extinction: u.mag = 0 * u.mag
thickness: float = 0.2
@cached_property
def _velocity_3d_spherical(self) -> velunit:
"""
Calculate line of sight velocity at given radius, phi, theta
V ~ 1/brightness (given that v~1/n_e^2 and brightness~ne^2)
"""
rho, theta, phi = np.meshgrid(self._rad_grid, self._theta_grid, self._phi_grid, indexing='ij')
vel_cube = np.zeros_like(self._brightness_3d_spherical)
rec = (rho <= self.radius) & (rho >= (self.radius * (1 - self.thickness)))
vel_cube[rec] = \
np.sin(theta[rec]) * \
np.cos(phi[rec]) * \
self.expansion_velocity / self._brightness_3d_spherical[rec] * \
np.median(self._brightness_3d_spherical[self._brightness_3d_spherical > 0])
return vel_cube
@cached_property
def _velocity_3d_cartesian(self) -> velunit:
return interpolate_sphere_to_cartesian(self._velocity_3d_spherical, x_grid=self._cartesian_x_grid,
y_grid=self._cartesian_y_grid, z_grid=self._cartesian_z_grid,
rad_grid=self._rad_grid, theta_grid=self._theta_grid,
phi_grid=self._phi_grid, pxscale=self.pxscale)
def _turbulent_lsf(self, velocity):
"""Line spread function as a function of coorinates, including the velocity center shift"""
# mu = self.velocity(theta, phi)
mu = self._velocity_3d_cartesian[:, :, :, None] * velunit + self.sys_velocity
sig = self.turbulent_sigma
return 1. / (np.sqrt(2. * np.pi) * sig) * np.exp(-np.power((velocity - mu) / sig, 2.) / 2)
def _d_spectrum_cartesian(self, velocity: velunit):
"""Returns local spectrum, per pc**3 of area"""
return (self._brightness_3d_cartesian[:, :, :, None] * (
fluxunit / u.pc ** 3) * self._turbulent_lsf(velocity)).to(fluxunit / velunit / u.pc ** 3)
@cached_property
def line_profile(self) -> (fluxunit / velunit):
"""
Produces the distribution of the observed line profiles in each pixels of the sky plane
"""
vel_axis = self.spectral_axis.to(velunit, equivalencies=u.spectral())
_, _, _, vels = np.meshgrid(self._cartesian_z_grid,
self._cartesian_y_grid,
self._cartesian_x_grid,
vel_axis, indexing='ij')
spectrum = (
np.sum(self._d_spectrum_cartesian(vels), axis=2
).T * (self._cartesian_x_grid[1] - self._cartesian_x_grid[0]
) * (self._cartesian_y_grid[1] - self._cartesian_y_grid[0]
) * (self._cartesian_z_grid[1] - self._cartesian_z_grid[0])
)
return spectrum / np.sum(spectrum, axis=0)
@dataclass
class CustomNebula(Nebula):
"""
Class defining the custom nebulae with the user-defined distribution of the brighntess, continuum
and line shapes in different lines.
"""
pass
@dataclass
class ISM:
"""
Class defining the ISM contribution to the field of view
"""
wcs: WCS
width: int = 401 # Width of field of view in pixels
height: int = 401 # Width of field of view in pixels
spec_resolution: u.Angstrom = 0.06 * u.Angstrom # Spectral resolution of the simulation
npix_line: int = 1 # Minimal number of pixels for a resolution element at wl = 10000A for construction of vel.grid
distance: u.kpc = 50 * u.kpc # Distance to the object for further conversion between arcsec and pc
sys_velocity: velunit = 0 * velunit # Systemic velocity to center the vel.grid on
vel_amplitude: velunit = 150 * velunit # Maximal deviation from the systemic velocity to setup vel.grid
turbulent_sigma: velunit = 20. * velunit # turbulence vel. disp. to be used for every nebula unless other specified
R_V: float = 3.1 # R_V value defining the reddening curve (to be used unless other value is provided for a nebula)
ext_law: str = 'F99' # Reddening law (to be used unless other value is provided for a nebula)
def __post_init__(self):
assign_units(self, ['vel_amplitude', 'turbulent_sigma', 'sys_velocity', 'distance', 'spec_resolution'],
[velunit, velunit, velunit, u.kpc, u.Angstrom])
self.content = fits.HDUList()
self.content.append(fits.PrimaryHDU(header=self.wcs.to_header(), data=np.zeros(shape=(2, 2), dtype=int)))
self.vel_grid = np.linspace(-self.vel_amplitude + self.sys_velocity,
self.vel_amplitude + self.sys_velocity,
np.ceil(self.vel_amplitude / self.vel_resolution).astype(int) * 2 + 1)
self.pxscale = proj_plane_pixel_scales(self.wcs)[0] * 3600 * self.distance.to(u.pc) / 206265.
self.content[0].header['width'] = (self.width, "Width of field of view, px")
self.content[0].header['height'] = (self.height, "Height of field of view, px")
self.content[0].header['PhysRes'] = (self.pxscale.value, "Physical resolution, pc/px")
self.content[0].header['Dist'] = (self.distance.value, "Distance, kpc")
self.content[0].header['Vsys'] = (self.distance.value, "Systemic Velocity, km/s")
self.content[0].header['VelRes'] = (self.vel_resolution.value, "Velocity resolution, km/s/px")
self.content[0].header['TurbSig'] = (self.turbulent_sigma.value, "Default turbulent velocity dispersion, km/s")
self.content[0].header['Nobj'] = (0, "Number of generated nebulae")
@cached_property
def vel_resolution(self):
return (self.spec_resolution / self.npix_line / (10000 * u.Angstrom) * c.c).to(velunit)
def _get_continuum(self, my_comp, wl_grid):
"""
Properly extracts continuum for current nebula taking into account its shape and surface brightness
:param my_comp:
:param wl_grid:
:return: continuum
"""
cont_type = self.content[my_comp + "_CONTINUUM"].header.get("CONTTYPE")
continuum = self.content[my_comp + "_CONTINUUM"].data
cont_norm = self.content[my_comp + "_CONTINUUM"].header.get("CONTFLUX")
cont_norm_wl = self.content[my_comp + "_CONTINUUM"].header.get("CONTWL")
if cont_type.lower() == 'model':
cont_wl_fullrange = continuum[0, :]
cont_fullrange = continuum[1, :]
p = interp1d(continuum[0, :], continuum[1, :], assume_sorted=True,
bounds_error=False, fill_value='extrapolate')
continuum = p(wl_grid)
elif cont_type.lower() == 'poly':
p = np.poly1d(continuum)
cont_wl_fullrange = np.linspace(3500, 10000, 501)
cont_fullrange = p(cont_wl_fullrange)
continuum = p(wl_grid)
elif cont_type.lower() == 'bb':
cont_wl_fullrange = np.linspace(3500, 10000, 501)
cont_fullrange = 1 / cont_wl_fullrange ** 5 / (
np.exp(6.63e-27 * 3e10 / cont_wl_fullrange / 1e-8 / continuum / 1.38e-16) - 1)
continuum = 1 / wl_grid ** 5 / (np.exp(6.63e-27 * 3e10 / wl_grid / 1e-8 / continuum / 1.38e-16) - 1)
t_filter = None
if type(cont_norm_wl) == str:
file_filter = os.path.join(lvmdatasimulator.ROOT_DIR, 'data', 'instrument', 'filters', cont_norm_wl+".dat")
if not os.path.isfile(file_filter):
log.warning("Cannot find filter {0}. "
"Default Wavelength = 5500A will be used for continuum normalization".format(cont_norm_wl))
cont_norm_wl = 5500.
t_filter = None
else:
t_filter = ascii.read(file_filter, names=['lambda', 'transmission'])
cont_norm_wl = np.sum(t_filter['lambda'] * t_filter['transmission']) / np.sum(t_filter['transmission'])
if t_filter is None:
cont_model_max = cont_fullrange[np.argmin(abs(cont_wl_fullrange - cont_norm_wl))]
else:
dl = np.roll(t_filter['lambda'], -1) - t_filter['lambda']
dl[-1] = dl[-2]
w_filter = np.sum(dl * t_filter['transmission'])/np.max(t_filter['transmission'])
p = interp1d(t_filter['lambda'], t_filter['transmission'], assume_sorted=True,
fill_value=0, bounds_error=False)
cont_model_max = np.sum(cont_fullrange * p(cont_wl_fullrange)) / w_filter
if ~np.isfinite(cont_norm) or cont_norm <= 0:
cont_norm = self.content[my_comp + "_CONTINUUM"].header.get("CONTMAG") * u.ABmag
cont_norm = cont_norm.to(u.STmag, u.spectral_density(cont_norm_wl * u.AA)).to(u.erg/u.s/u.cm**2/u.AA).value
return continuum / cont_model_max * cont_norm * (wl_grid[1] - wl_grid[0])
def _add_fits_extension(self, name, value, obj_to_add, zorder=0, cur_wavelength=0, add_fits_kw=None,
add_counter=False):
self.content.append(fits.ImageHDU(np.atleast_1d(value), name=name))
self.content[-1].header['Nebtype'] = (type(obj_to_add).__name__, "Type of the nebula")
is_dark = ((obj_to_add.max_brightness <= 0) and (obj_to_add.max_extinction > 0))
self.content[-1].header['Dark'] = (is_dark, " Emitting or absorbing nebula?")
self.content[-1].header['X0'] = (obj_to_add.x0, "Position in the field of view")
self.content[-1].header['Y0'] = (obj_to_add.y0, "Position in the field of view")
self.content[-1].header['Zorder'] = (zorder, "Z-order in the field of view")
self.content[-1].header['NCHUNKS'] = (obj_to_add.nchunks, "N of chunks for convolution")
if type(obj_to_add) in [Bubble, Cloud]:
self.content[-1].header['Radius'] = (obj_to_add.radius.to_value(u.pc), "Radius of the nebula, pc")
self.content[-1].header['PertOrd'] = (obj_to_add.perturb_degree, "Degree to produce random perturbations")
self.content[-1].header['PertAmp'] = (obj_to_add.perturb_amplitude, "Max amplitude of random perturb.")
if type(obj_to_add) in [DIG, Nebula, Rectangle, Ellipse, Circle]:
self.content[-1].header['PertScl'] = (obj_to_add.perturb_scale.to_value(u.pc),
"Scale of the random perturbations, pc")
self.content[-1].header['PertAmp'] = (obj_to_add.perturb_amplitude, "Max amplitude of random perturb.")
if type(obj_to_add) in [Filament]:
self.content[-1].header['Width'] = (obj_to_add.width.to_value(u.pc), 'Width of the filament, pc')
self.content[-1].header['Length'] = (obj_to_add.length.to_value(u.pc), 'Length of the filament, pc')
if type(obj_to_add) not in [Bubble, Cloud, Galaxy]:
if obj_to_add.vel_gradient is not None:
self.content[-1].header['Vgrad'] = (obj_to_add.vel_gradient.to_value(velunit / u.pc),
'Velocity gradient, km/s per pc')
if obj_to_add.vel_pa is not None:
self.content[-1].header['PAkin'] = (obj_to_add.vel_pa.to_value(u.degree),
'Kinematical PA, degree')
if type(obj_to_add) in [Nebula, Rectangle]:
self.content[-1].header['Width'] = (obj_to_add.width.to_value(u.pc), 'Width of the nebula, pc')
self.content[-1].header['Height'] = (obj_to_add.height.to_value(u.pc), 'Height of the nebula, pc')
if type(obj_to_add) in [Ellipse, Filament, Galaxy]:
self.content[-1].header['PA'] = (obj_to_add.PA.to_value(u.degree), 'Position angle, degree')
if type(obj_to_add) in [Galaxy]:
self.content[-1].header['Reff'] = (obj_to_add.r_eff.to_value(u.kpc), 'Effective radius of the galaxy, kpc')
self.content[-1].header['NSersic'] = (obj_to_add.n, 'Sersic index')
self.content[-1].header['Rlim'] = (obj_to_add.rad_lim, 'Limiting distance in Reff')
self.content[-1].header['Vrot'] = (obj_to_add.vel_rot.to_value(velunit),
'Rotational velocity, km/s')
self.content[-1].header['PAkin'] = (obj_to_add.vel_pa.to_value(u.degree),
'Kinematical PA, degree')
if type(obj_to_add) in [Ellipse, Circle]:
self.content[-1].header['Radius'] = (obj_to_add.radius.to_value(u.kpc), 'Radius (major axis), pc')
if type(obj_to_add) in [Ellipse, Galaxy]:
self.content[-1].header['AxRat'] = (obj_to_add.ax_ratio, "Axis ratio")
if (obj_to_add.max_brightness <= 0) and (obj_to_add.max_extinction > 0):
self.content[-1].header['MaxExt'] = (obj_to_add.max_extinction.value, "Max extinction, mag/pix")
elif obj_to_add.max_brightness > 0:
self.content[-1].header['MaxBrt'] = (obj_to_add.max_brightness.value, "Max brightness, erg/s/cm^2/arcsec^2")
if type(obj_to_add) == Bubble:
self.content[-1].header['Vexp'] = (obj_to_add.expansion_velocity.to_value(velunit),
'Expansion velocity, km/s')
self.content[-1].header['SysVel'] = (obj_to_add.sys_velocity.to_value(velunit), "Systemic velocity, km/s")
self.content[-1].header['TurbVel'] = (obj_to_add.turbulent_sigma.to_value(velunit),
"ISM Velocity dispersion, km/s")
self.content[-1].header['SpecID'] = (obj_to_add.spectrum_id, "Ref. spectrum ID in model grid")
self.content[-1].header['NLines'] = (obj_to_add.n_brightest_lines, "Maximal number of lines to use")
if cur_wavelength:
self.content[-1].header['Lambda'] = (cur_wavelength, "Current line wavelength")
if add_fits_kw is not None:
for kw in add_fits_kw:
self.content[-1].header[kw] = add_fits_kw[kw]
if add_counter:
self.content[0].header['Nobj'] = (self.content[0].header['Nobj'] + 1, "Total number of nebulae")
def add_nebula(self, obj_to_add, obj_id=0, zorder=0, add_fits_kw=None, continuum=None):
"""
Method to add the particular nebula to the ISM object and to the output multi-extensions fits file
"""
if type(obj_to_add) not in [Nebula, Bubble, Filament, DIG, Cloud, Galaxy, Ellipse, Circle,
Rectangle, CustomNebula]:
log.warning('Skip nebula of wrong type ({0})'.format(type(obj_to_add)))
return
if (obj_to_add.max_brightness <= 0) and (obj_to_add.max_extinction <= 0) and (continuum is None):
log.warning('Skip nebula with zero extinction and brightness')
return
if obj_to_add.max_brightness > 0:
brt = obj_to_add.brightness_skyplane.value
if obj_to_add.spectrum_id is not None and not obj_to_add.linerat_constant:
brt_4d = obj_to_add.brightness_skyplane_lines.value
else:
brt_4d = None
elif obj_to_add.max_extinction > 0:
brt = obj_to_add.extinction_skyplane.value
brt_4d = None
elif continuum is not None:
brt = obj_to_add.brightness_skyplane
brt_4d = None
self._add_fits_extension(name="Comp_{0}_Brightness".format(obj_id), value=brt,
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw, add_counter=True)
if obj_to_add.max_brightness > 0:
if brt_4d is not None:
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
wl_list = hdu[obj_to_add.spectrum_id].data[1:, 0]
if obj_to_add.n_brightest_lines is not None and \
(obj_to_add.n_brightest_lines > 0) and (obj_to_add.n_brightest_lines < len(wl_list)):
wl_list = wl_list[np.argsort(hdu[obj_to_add.spectrum_id].data[1:, 1]
)[::-1][: obj_to_add.n_brightest_lines]]
for line_ind in range(brt_4d.shape[0]):
self._add_fits_extension(name="Comp_{0}_Flux_{1}".format(obj_id, wl_list[line_ind]),
value=brt_4d[line_ind],
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw,
cur_wavelength=wl_list[line_ind])
elif obj_to_add.spectrum_id is not None and obj_to_add.linerat_constant:
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
data_save = hdu[obj_to_add.spectrum_id].data[1:, :2]
if obj_to_add.n_brightest_lines is not None and \
(obj_to_add.n_brightest_lines > 0) and \
(obj_to_add.n_brightest_lines < len(hdu[obj_to_add.spectrum_id].data[1:, 0])):
data_save = data_save[np.argsort(data_save[:, 1])[::-1][: obj_to_add.n_brightest_lines]]
self._add_fits_extension(name="Comp_{0}_FluxRatios".format(obj_id),
value=data_save.T,
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw)
if type(obj_to_add) == Bubble:
self._add_fits_extension(name="Comp_{0}_LineProfile".format(obj_id), value=obj_to_add.line_profile.value,
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw)
if obj_to_add.vel_field is not None:
self._add_fits_extension(name="Comp_{0}_Vel".format(obj_id), value=obj_to_add.vel_field.value,
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw)
if continuum is not None:
self._add_fits_extension(name="Comp_{0}_Continuum".format(obj_id), value=continuum,
obj_to_add=obj_to_add, zorder=zorder, add_fits_kw=add_fits_kw)
return self.content
def save_ism(self, filename):
self.content.writeto(filename, overwrite=True)
log.info("Generated ISM saved to {0}".format(filename))
def generate(self, all_objects):
"""
Generate all the Nebulae from the input list
Args:
all_objects: list -- contains a dictionary describing the nebula to add:
example:
all_objects = [{type: "Bubble, Filament, DIG, ....",
sys_velocity: 0 * u.km/u.s,
expansion_velocity: 30 * u.km/u.s,
turbulent_sigma: 10 * u.km/u.s,
radius: 5 * u.pc,
max_brightness: 1e-16 * u.erg / u.cm**2 / u.s / u.arcsec ** 2,
RA: "08h12m13s",
DEC: "-20d14m13s",
'perturb_degree': 8, # max. order of spherical harmonics to generate inhomogeneities
'perturb_amplitude': 0.1, # relative max. amplitude of inhomogeneities,
'perturb_scale': 200 * u.pc, # spatial scale to generate inhomogeneities (DIG only)
'distance': 50 * u.kpc, # distance to the nebula (default is from ISM)
'cloudy_id': None, # id of pre-generated Cloudy model
'cloudy_params': {'Z': 0.5, 'qH': 49., 'nH': 10, 'Teff': 30000, 'Geometry': 'Sphere'}, #
parameters defining the pre-generated Cloudy model (used if spectrum_id is None)
'n_brightest_lines': 10, # only this number of the brightest lines will be evaluated
'linerat_constant': False # if True -> lines ratios don't vary across Cloud/Bubble
'continuum_type': 'BB' or 'Model' or 'Poly' # type of the continuum model
'continuum_data': model_id or [poly_coefficients] or Teff or dict with "wl" and "flux"
# value defining cont. shape
'continuum_flux': 1e-16 * u.erg / u.cm ** 2 / u.s / u.arcsec **2 / u.AA,
'continuum_mag': 22 * u.mag,
'continuum_wl': 5500, # could be also R, V, B,
'ext_law': 'F99', # Extinction law, one of those used by pyneb (used for dark nebulae)
'ext_rv': 3.1, # Value of R_V for extinction curve calculation (used for dark nebulae)
'vel_gradient: 12. * u.km / u.s / u.pc # Line-of-sight velocity gradient
'vel_pa: 30. * u.degree # PA of kinematical axis (for vel_gradient or vel_rot)
}]
"""
if type(all_objects) is dict:
all_objects = [all_objects]
if type(all_objects) not in [list, tuple]:
log.warning('Cannot generate nebulae as the input is not a list or tuple')
return None
all_objects = [cobj for cobj in all_objects if cobj.get('type') in ['Nebula', 'Bubble', 'Galaxy',
'Filament', 'DIG', 'Cloud',
'Rectangle', 'Circle', 'Ellipse',
'CustomNebula']]
n_objects = len(all_objects)
log.info("Start generating {} nebulae".format(n_objects))
bar = progressbar.ProgressBar(max_value=n_objects).start()
obj_id = self.content[0].header['Nobj']
obj_id_ini = self.content[0].header['Nobj']
for ind_obj, cur_obj in enumerate(all_objects):
bar.update(ind_obj)
# Setup default parameters for missing keywords
kin_pa_default = 0
if 'PA' in cur_obj:
kin_pa_default = cur_obj['PA']
for k, v, unit in zip(['max_brightness', 'max_extinction', 'thickness',
'expansion_velocity', 'sys_velocity',
'turbulent_sigma', 'perturb_degree',
'perturb_amplitude', 'perturb_scale', 'radius', 'distance',
'continuum_type', 'continuum_data', 'continuum_flux', 'continuum_mag',
'continuum_wl', 'ext_law', 'ext_rv', 'vel_gradient', 'vel_rot', 'vel_pa',
'n_brightest_lines', 'offset_RA', 'offset_DEC', 'RA', 'DEC'],
[0, 0, 1., 0, self.sys_velocity, self.turbulent_sigma, 0, 0.1, 0, 0, self.distance,
None, None, 0, None, 5500., self.ext_law, self.R_V, 0, 0, kin_pa_default, None,
None, None, None, None],
[fluxunit, u.mag, None, velunit, velunit, velunit, None, None,
u.pc, u.pc, u.kpc, None, None, fluxunit/u.AA, None, u.Angstrom,
None, None, velunit / u.pc, velunit, u.degree, None, u.arcsec, u.arcsec,
u.degree, u.degree]):
set_default_dict_values(cur_obj, k, v, unit=unit)
for k in ['max_brightness', 'max_extinction', 'radius', 'continuum_flux']:
if cur_obj[k] < 0:
cur_obj[k] = 0
if (cur_obj['max_brightness'] == 0) and (cur_obj['max_extinction'] == 0) and \
(((cur_obj['continuum_mag'] is None) and (cur_obj['continuum_flux'] == 0)) or
(cur_obj['continuum_data'] is None) or (cur_obj['continuum_type'] is None)):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if lvmdatasimulator.CLOUDY_MODELS is None or (cur_obj['max_brightness'] <= 0):
cloudy_model_index = None
cloudy_model_id = None
else:
if cur_obj.get('cloudy_id') is None:
if cur_obj.get('cloudy_params') is None or (type(cur_obj.get('cloudy_params')) is not dict):
log.warning("Neither of 'cloudy_id' or 'cloudy_params' is set for the nebula #{0}: "
"use default 'cloudy_id={1}'".format(ind_obj,
lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']))
cur_obj['cloudy_id'] = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
else:
for p in lvmdatasimulator.CLOUDY_SPEC_DEFAULTS:
if p == 'id':
continue
if cur_obj['cloudy_params'].get(p) is None:
cur_obj['cloudy_params'][p] = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS[p]
cloudy_model_index, cloudy_model_id = find_model_id(file=lvmdatasimulator.CLOUDY_MODELS,
check_id=cur_obj.get('cloudy_id'),
params=cur_obj.get('cloudy_params'))
if cur_obj.get('linerat_constant') is None:
if cur_obj['type'] in ['Bubble', 'Cloud']:
cur_obj['linerat_constant'] = False
else:
cur_obj['linerat_constant'] = True
if cur_obj['type'] == 'DIG':
if cur_obj.get('max_brightness') is None or cur_obj.get('max_brightness') <= 0:
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if not cur_obj.get('zorder'):
cur_obj['zorder'] = -1
if cur_obj['perturb_scale'] < 0:
cur_obj['perturb_scale'] = 0
generated_object = DIG(max_brightness=cur_obj.get('max_brightness'),
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) / self.distance.to(u.pc)),
perturb_scale=cur_obj['perturb_scale'],
perturb_amplitude=cur_obj['perturb_amplitude'],
pix_width=self.width, pix_height=self.height,
)
else:
# ==== Check input parameters and do necessary conversions
if not cur_obj.get('zorder'):
cur_obj['zorder'] = 0
if not ((cur_obj.get('RA') is not None and cur_obj.get('DEC') is not None) or
(cur_obj.get('X') is not None and cur_obj.get('Y') is not None) or
(cur_obj.get('offset_X') is not None and cur_obj.get('offset_Y') is not None) or
(cur_obj.get('offset_RA') is not None and cur_obj.get('offset_DEC') is not None)):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if cur_obj['type'] in ['Rectangle', 'Nebula'] and not (('width' in cur_obj) and ('height' in cur_obj)):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if (cur_obj['type'] in ["Bubble", "Cloud", "Ellipse", 'Circle']) and (cur_obj['radius'] == 0):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if cur_obj['type'] == 'Filament' and not (('length' in cur_obj) and ('PA' in cur_obj)):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if cur_obj['type'] == 'Galaxy' and not (('r_eff' in cur_obj) and ('PA' in cur_obj) and
('ax_ratio' in cur_obj)):
log.warning("Wrong set of parameters for the nebula #{0}: skip this one".format(ind_obj))
continue
if cur_obj['type'] == 'Galaxy':
if 'n' not in cur_obj:
log.info("Set default Sersic index n=1 for the nebula #{0}".format(ind_obj))
cur_obj['n'] = 1
if 'rad_lim' not in cur_obj:
cur_obj['rad_lim'] = 3.
if not (cur_obj.get('X') is not None and cur_obj.get('Y') is not None):
if cur_obj.get('offset_X') is not None and cur_obj.get('offset_Y') is not None:
x = (self.width - 1) / 2. + cur_obj.get('offset_X')
y = (self.height - 1) / 2. + cur_obj.get('offset_Y')
elif cur_obj.get('RA') is not None and cur_obj.get('DEC') is not None:
radec = SkyCoord(ra=cur_obj.get('RA'), dec=cur_obj.get('DEC'))
x, y = self.wcs.world_to_pixel(radec)
elif cur_obj.get('offset_RA') is not None and cur_obj.get('offset_DEC') is not None:
x = (self.width - 1) / 2. - (cur_obj.get('offset_RA').to_value(u.degree) /
proj_plane_pixel_scales(self.wcs)[0])
y = (self.height - 1) / 2. + (cur_obj.get('offset_DEC').to_value(u.degree) /
proj_plane_pixel_scales(self.wcs)[0])
x = np.round(x).astype(int)
y = np.round(y).astype(int)
else:
x, y = [cur_obj.get('X'), cur_obj.get('Y')]
if (cur_obj['thickness'] <= 0) or (cur_obj['thickness'] > 1):
log.warning("Wrong value of thickness of the nebula #{0}: set it to 1.".format(ind_obj))
cur_obj['thickness'] = 1.
if cur_obj['type'] == "Bubble" and cur_obj.get('expansion_velocity') <= 0:
log.warning("Contracting bubbles are not supported (nebula #{0})."
" Use non-expanding cloud instead".format(ind_obj))
cur_obj['type'] = "Cloud"
if cur_obj['type'] in ["Bubble", "Cloud"]:
if cur_obj['perturb_degree'] < 0:
cur_obj['perturb_degree'] = 0
elif cur_obj['type'] == 'Filament' and ('width' not in cur_obj):
log.info("Set default width of the filament 0.1 pc for the nebula #{0}".format(ind_obj))
cur_obj['width'] = 0.1 * u.pc
# ==== Start calculations of different nebulae
if cur_obj['type'] == "Bubble":
generated_object = Bubble(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
spectral_axis=self.vel_grid,
expansion_velocity=cur_obj.get('expansion_velocity'),
thickness=cur_obj['thickness'],
radius=cur_obj['radius'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
perturb_degree=cur_obj['perturb_degree'],
perturb_amplitude=cur_obj['perturb_amplitude'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
linerat_constant=cur_obj['linerat_constant'],
)
elif cur_obj['type'] == "Cloud":
generated_object = Cloud(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
thickness=cur_obj['thickness'],
radius=cur_obj['radius'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
perturb_degree=cur_obj['perturb_degree'],
perturb_amplitude=cur_obj['perturb_amplitude'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
linerat_constant=cur_obj['linerat_constant'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
)
elif cur_obj['type'] == "Filament":
generated_object = Filament(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
width=cur_obj['width'],
length=cur_obj['length'],
PA=cur_obj['PA'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
)
elif cur_obj['type'] == "Galaxy":
generated_object = Galaxy(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
r_eff=cur_obj['r_eff'],
rad_lim=cur_obj['rad_lim'],
ax_ratio=cur_obj['ax_ratio'],
PA=cur_obj['PA'],
n=cur_obj['n'],
vel_rot=cur_obj.get('vel_rot'),
vel_pa=cur_obj['vel_pa'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
)
elif cur_obj['type'] == "Ellipse":
generated_object = Ellipse(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
radius=cur_obj['radius'],
ax_ratio=cur_obj['ax_ratio'],
PA=cur_obj['PA'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
perturb_scale=cur_obj['perturb_scale'],
perturb_amplitude=cur_obj['perturb_amplitude'],
)
elif cur_obj['type'] == "Circle":
generated_object = Circle(xc=x, yc=y,
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
radius=cur_obj['radius'],
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
perturb_scale=cur_obj['perturb_scale'],
perturb_amplitude=cur_obj['perturb_amplitude'],
)
elif cur_obj['type'] == "Rectangle" or (cur_obj['type'] == "Nebula"):
generated_object = Rectangle(xc=x, yc=y,
width=cur_obj.get('width'), height=cur_obj.get('height'),
max_brightness=cur_obj.get('max_brightness'),
max_extinction=cur_obj.get('max_extinction'),
spectrum_id=cloudy_model_index,
n_brightest_lines=cur_obj['n_brightest_lines'],
turbulent_sigma=cur_obj['turbulent_sigma'],
sys_velocity=cur_obj['sys_velocity'],
vel_gradient=cur_obj['vel_gradient'],
vel_pa=cur_obj['vel_pa'],
pxscale=self.pxscale * (cur_obj['distance'].to(u.pc) /
self.distance.to(u.pc)),
perturb_scale=cur_obj['perturb_scale'],
perturb_amplitude=cur_obj['perturb_amplitude'],
)
elif cur_obj['type'] == "CustomNebulae":
# generated_object = CustomNebula(xc=x, yc=y,)
log.warning("Custom Nebulae will be added soon")
continue
else:
log.warning("Unrecognized type of the nebula #{0}: skip this one".format(ind_obj))
continue
if cloudy_model_index is not None:
if cur_obj['linerat_constant']:
lr = "Constant"
else:
lr = "Variable"
add_fits_kw = {"Model_ID": cloudy_model_id, "LineRat": lr}
else:
add_fits_kw = {}
add_fits_kw["Distance"] = (cur_obj['distance'].to_value(u.kpc), "Distance to the nebula, kpc")
continuum = None
if cur_obj['continuum_type'] is not None and cur_obj['continuum_data'] is not None \
and cur_obj['continuum_type'].lower() in ['bb', 'poly', 'model']:
if cur_obj['continuum_type'].lower() == 'model':
if isinstance(cur_obj['continuum_data'], dict) and ('wl' in cur_obj['continuum_data']) and \
('flux' in cur_obj['continuum_data']):
if len(cur_obj['continuum_data']['wl']) != len(cur_obj['continuum_data']['flux']):
log.error("Number of wavelength and flux points is inconsistent for continuum")
else:
wlscale = cur_obj['continuum_data']['wl']
continuum = np.vstack([wlscale, cur_obj['continuum_data']['flux']])
elif ~isinstance(cur_obj['continuum_data'], dict) and lvmdatasimulator.CONTINUUM_MODELS is not None:
with fits.open(lvmdatasimulator.CONTINUUM_MODELS) as hdu:
if cur_obj['continuum_data'] >= hdu[0].data.shape[0]:
log.warning("Wrong continuum model ID for nebula #{0}".format(obj_id))
else:
wlscale = (np.arange(hdu[0].data.shape[1]) - hdu[0].header['CRPIX1'] + 1
) * hdu[0].header['CDELT1'] + hdu[0].header['CRVAL1']
continuum = np.vstack([wlscale, hdu[0].data[cur_obj['continuum_data']]])
elif cur_obj['continuum_type'].lower() in ['poly', 'bb']:
continuum = cur_obj['continuum_data']
if continuum is not None:
if add_fits_kw is None:
add_fits_kw = {}
add_fits_kw['CONTTYPE'] = (cur_obj['continuum_type'], "Type of the continuum")
if cur_obj['continuum_flux'] > 0:
contflux = cur_obj['continuum_flux'].to_value(u.erg / u.cm ** 2 / u.s / u.arcsec ** 2 / u.AA)
else:
contflux = 0
add_fits_kw['CONTFLUX'] = (contflux,
"Continuum brightness (in erg/s/cm^2/asec^2/AA) at ref. wl/Filter")
if cur_obj['continuum_mag'] is not None:
contmag = cur_obj['continuum_mag'].to_value(u.mag)
else:
contmag = None
add_fits_kw['CONTMAG'] = (contmag,
"Continuum brightness (in mag/asec^2) at ref. wl/Filter")
if isinstance(cur_obj['continuum_wl'], str):
cont_wl = cur_obj['continuum_wl']
else:
cont_wl = cur_obj['continuum_wl'].to_value(u.AA)
add_fits_kw['CONTWL'] = (cont_wl, 'Reference wavelength/filter for cont. flux/mag')
if cur_obj.get('max_extinction') > 0:
if add_fits_kw is None:
add_fits_kw = {}
add_fits_kw['EXT_LAW'] = (cur_obj['ext_law'], "Extinction law according to pyneb list")
add_fits_kw['EXT_RV'] = (cur_obj['ext_rv'], "R_V value for extinction calculations")
self.add_nebula(generated_object, obj_id=obj_id, zorder=cur_obj.get('zorder'), add_fits_kw=add_fits_kw,
continuum=continuum)
obj_id += 1
bar.finish()
if (obj_id - obj_id_ini) == 0:
return None
else:
return True
def load_nebulae(self, file):
"""
Load previously saved fits-file containing the information about all nebulae.
Note: Grid should be equal to that used when fits-file was generated!
"""
if not os.path.isfile(file):
log.warning("ISM doesn't contain any nebula")
return None
else:
with fits.open(file) as hdu:
wcs = WCS(hdu[0].header)
cdelt_file = [cdelt.to(u.degree).value for cdelt in wcs.proj_plane_pixel_scales()]
cdelt_ism = [cdelt.to(u.degree).value for cdelt in self.wcs.proj_plane_pixel_scales()]
check = ~np.isclose(cdelt_file, cdelt_ism)
check = np.append(check, ~ | np.isclose(wcs.wcs.crval, self.wcs.wcs.crval) | numpy.isclose |
from utilities.edef import EventDefinition
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QApplication
import pyca
from psp.Pv import Pv
import time
import numpy as np
import pickle
from operator import attrgetter
import utilities.matlog
from utilities.batch_get import batch_get
import math
import subprocess
import re
import os
import json
from datetime import datetime
MODEL_AVAILABLE = False
try:
import utilities.model as model
import utilities.fit
MODEL_AVAILABLE = True
except ImportError:
MODEL_AVAILABLE = False
"""A collection of classes to represent a linac orbit"""
class BaseBPM(object):
"""Abstract base class for a Beam Position Monitor.
Each BPM has an X, Y, and TMIT value, and a Z Position."""
def __init__(self, name, z_pos=None):
self.name = name
self.z = z_pos
self.is_energy_bpm = False
@property
def x(self):
raise NotImplementedError
@property
def y(self):
raise NotImplementedError
@property
def tmit(self):
raise NotImplementedError
def __getitem__(self, key):
if key.lower() == "x":
return self.x
if key.lower() == "y":
return self.y
if key.lower() == "tmit":
return self.tmit
if key.lower() == "z":
return self.z
def __str__(self):
return self.name
class StaticBPM(BaseBPM):
"""StaticBPM is a BPM, frozen in time. An Orbit with Static BPMs is how you make a reference orbit."""
def __init__(self, name, z_pos=None, x_val=None, y_val=None, tmit_val=None, x_rms=0.0, y_rms=0.0, tmit_rms=0.0, x_severity=None, y_severity=None, tmit_severity=None, x_status=None, y_status=None, tmit_status=None, is_energy_bpm=False):
super(StaticBPM, self).__init__(name, z_pos=z_pos)
self._x = x_val
self._y = y_val
self._tmit = tmit_val
self._x_rms = x_rms
self._y_rms = y_rms
self._tmit_rms = tmit_rms
self._x_sevr = x_severity
self._y_sevr = y_severity
self._tmit_sevr = tmit_severity
self._x_status = x_status
self._y_status = y_status
self._tmit_status = tmit_status
self.is_energy_bpm = is_energy_bpm
@BaseBPM.x.getter
def x(self):
return self._x
@BaseBPM.y.getter
def y(self):
return self._y
@BaseBPM.tmit.getter
def tmit(self):
return self._tmit
@property
def x_rms(self):
return self._x_rms
@property
def y_rms(self):
return self._y_rms
@property
def tmit_rms(self):
return self._tmit_rms
def severity(self, axis):
if axis == "x":
return self._x_sevr
if axis == "y":
return self._y_sevr
if axis == "tmit":
return self._tmit_sevr
raise Exception("Axis parameter not valid")
@property
def x_severity(self):
return self.severity('x')
@property
def y_severity(self):
return self.severity('y')
@property
def tmit_severity(self):
return self.severity('tmit')
def status(self, axis):
if axis == "x":
return self._x_status
if axis == "y":
return self._y_status
if axis == "tmit":
return self._tmit_status
raise Exception("Axis parameter not valid")
@property
def x_status(self):
return self.status('x')
@property
def y_status(self):
return self.status('y')
@property
def tmit_status(self):
return self.status('tmit')
def to_static(self):
return self
class BPM(BaseBPM):
"""BPM is a BPM value backed by EPICS PVs for X, Y, TMIT, and Z Position."""
def __init__(self, name, edef=None):
super(BPM, self).__init__(name)
self.edef = None
self.edef_suffix = ''
self.set_edef(edef)
self.z_pv_obj = None
self.x_pv_obj = None
self.y_pv_obj = None
self.tmit_pv_obj = None
@classmethod
def pv_count(cls):
return 4
def set_edef(self, edef):
if self.edef == edef:
return
self.edef_suffix = ''
self.edef = edef
if edef is not None:
if isinstance(edef, EventDefinition):
self.edef_suffix = str(edef.edef_num)
else:
self.edef_suffix = str(edef)
@BaseBPM.x.getter
def x(self):
return self.x_pv_obj.data["value"]
@BaseBPM.y.getter
def y(self):
return self.y_pv_obj.data["value"]
@BaseBPM.tmit.getter
def tmit(self):
return self.tmit_pv_obj.data["value"]
@property
def x_rms(self):
return 0.0
@property
def y_rms(self):
return 0.0
@property
def tmit_rms(self):
return 0.0
def pv_objects(self):
return [self.x_pv_obj, self.y_pv_obj, self.tmit_pv_obj]
def z_pv(self):
return self.name + ":Z"
def x_pv(self):
return self.name + ":X" + self.edef_suffix
def y_pv(self):
return self.name + ":Y" + self.edef_suffix
def tmit_pv(self):
return self.name + ":TMIT" + self.edef_suffix
def buffer_pv(self, axis, suffix='HST'):
return self.edef.buffer_pv("{name}:{axis}".format(name=self.name, axis=axis.upper()), suffix=suffix)
def x_buffer(self):
return self.get_buffer('X')
def y_buffer(self):
return self.get_buffer('Y')
def tmit_buffer(self):
return self.get_buffer('TMIT')
def x_rms_buffer(self):
return self.get_buffer('X', 'RMSHST')
def y_rms_buffer(self):
return self.get_buffer('Y', 'RMSHST')
def tmit_rms_buffer(self):
return self.get_buffer('TMIT', 'RMSHST')
def get_buffer(self, axis, suffix='HST'):
if self.edef is None:
raise Exception("BPM must have an EDEF to gather buffered data.")
return self.edef.get_buffer("{name}:{axis}".format(name=self.name, axis=axis.upper()), suffix=suffix)
def to_static(self, use_buffer=False):
if isinstance(self.edef, EventDefinition):
#Make extra sure we've got the latest data.
self.x_pv_obj.get()
self.y_pv_obj.get()
self.tmit_pv_obj.get()
x = self.x
y = self.y
tmit = self.tmit
x_rms = 0.0
y_rms = 0.0
tmit_rms = 0.0
if use_buffer and isinstance(self.edef, EventDefinition):
x = self.x_buffer()[-1]
y = self.y_buffer()[-1]
tmit = self.tmit_buffer()[-1]
x_rms = self.x_rms_buffer()[-1]
y_rms = self.y_rms_buffer()[-1]
tmit_rms = self.tmit_rms_buffer()[-1]
return StaticBPM(self.name, z_pos=self.z, x_val=x, y_val=y, tmit_val=tmit, x_rms=x_rms, y_rms=y_rms, tmit_rms=tmit_rms, x_status=self.status('x'), y_status=self.status('y'), tmit_status=self.status('tmit'), x_severity=self.severity('x'), y_severity=self.severity('y'), tmit_severity=self.severity('tmit'), is_energy_bpm=self.is_energy_bpm)
def status(self, axis):
if axis == 'x':
return self.x_pv_obj.data["status"]
if axis == 'y':
return self.y_pv_obj.data["status"]
if axis == "tmit":
return self.tmit_pv_obj.data["status"]
raise Exception("Axis parameter not valid.")
@property
def x_status(self):
return self.status('x')
@property
def y_status(self):
return self.status('y')
@property
def tmit_status(self):
return self.status('tmit')
def severity(self, axis):
if axis == 'x':
return self.x_pv_obj.data["severity"]
if axis == 'y':
return self.y_pv_obj.data["severity"]
if axis == "tmit":
return self.tmit_pv_obj.data["severity"]
raise Exception("Axis parameter not valid.")
@property
def x_severity(self):
return self.severity('x')
@property
def y_severity(self):
return self.severity('y')
@property
def tmit_severity(self):
return self.severity('tmit')
class DiffBPM(BaseBPM):
"""Represents the difference between two BPMs. Usually used
for making a difference orbit between a live orbit and a
static reference orbit."""
def __init__(self, bpm_a, bpm_b):
if bpm_a.name != bpm_b.name:
raise ValueError("{a} != {b}. BPMs used to created a DiffBPM must have the same name.".format(a=bpm_a.name, b=bpm_b.name))
else:
self.name = bpm_a.name
if bpm_a.z != bpm_b.z:
raise ValueError("For {bpm_name} Z_A = {za}, but Z_B = {zb}. BPMs used to create a DiffBPM must have the same z position.".format(bpm_name=self.name, za=bpm_a.z, zb=bpm_b.z))
else:
self.z = bpm_a.z
self.bpm_a = bpm_a
self.bpm_b = bpm_b
#self.bpm_b = bpm_b.to_static()
self.is_energy_bpm = bpm_a.is_energy_bpm or bpm_b.is_energy_bpm
@BaseBPM.x.getter
def x(self):
return self.bpm_a.x - self.bpm_b.x
@BaseBPM.y.getter
def y(self):
return self.bpm_a.y - self.bpm_b.y
@BaseBPM.tmit.getter
def tmit(self):
return self.bpm_a.tmit - self.bpm_b.tmit
@property
def x_rms(self):
try:
a_rms = self.bpm_a.x_rms
except AttributeError:
a_rms = 0.0
try:
b_rms = self.bpm_b.x_rms
except AttributeError:
b_rms = 0.0
return math.sqrt(a_rms**2.0 + b_rms**2.0)
@property
def y_rms(self):
try:
a_rms = self.bpm_a.y_rms
except AttributeError:
a_rms = 0.0
try:
b_rms = self.bpm_b.y_rms
except AttributeError:
b_rms = 0.0
return math.sqrt(a_rms**2.0 + b_rms**2.0)
@property
def tmit_rms(self):
try:
a_rms = self.bpm_a.tmit_rms
except AttributeError:
a_rms = 0.0
try:
b_rms = self.bpm_b.tmit_rms
except AttributeError:
b_rms = 0.0
return math.sqrt(a_rms**2.0 + b_rms**2.0)
def status(self, axis):
return max(self.bpm_a.status(axis), self.bpm_b.status(axis))
def severity(self, axis):
return max(self.bpm_a.severity(axis), self.bpm_b.severity(axis))
@property
def x_severity(self):
return self.severity('x')
@property
def y_severity(self):
return self.severity('y')
@property
def tmit_severity(self):
return self.severity('tmit')
class DiffTMITBPM(DiffBPM):
"""A DiffTMITBPM works exactly like a DiffBPM, except that its TMIT value
is calculated as TMIT_A / TMIT_B instead of TMIT_A - TMIT_B."""
@DiffBPM.tmit.getter
def tmit(self):
return self.bpm_a.tmit / self.bpm_b.tmit
@DiffBPM.tmit_rms.getter
def tmit_rms(self):
try:
a_rms = self.bpm_a.tmit_rms
except AttributeError:
a_rms = 0.0
try:
b_rms = self.bpm_b.tmit_rms
except AttributeError:
b_rms = 0.0
return math.sqrt((a_rms/self.bpm_a.tmit)**2.0 + (b_rms/self.bpm_b.tmit)**2.0)*self.tmit()
class BaseOrbit(QObject):
@classmethod
def from_dict(cls, d):
orbit = cls()
for (i, name) in enumerate(d['names']):
bpm = StaticBPM(str(name).strip(), z_pos=d['z'][i], x_val=d['x'][i], y_val=d['y'][i], tmit_val=d['tmit'][i], x_rms=d['x_rms'][i], y_rms=d['y_rms'][i], tmit_rms=d['tmit_rms'][i], x_severity=d['x_severity'][i], y_severity=d['y_severity'][i], tmit_severity=d['tmit_severity'][i])
orbit.append(bpm)
return orbit
@classmethod
def from_MATLAB_file(cls, filepath):
"""Files saved in the matlab format are awful to deal with: Plain-old lists turn into crazy nested nonsense."""
d = matlog.load(filepath)
orbit = cls()
for (i, name) in enumerate(d['data']['names'][0][0]):
bpm = StaticBPM(str(name).strip(), z_pos=d['data']['z'][0][0][0][i], x_val=d['data']['x'][0][0][0][i], y_val=d['data']['y'][0][0][0][i], tmit_val=d['data']['tmit'][0][0][0][i], x_rms=d['data']['x_rms'][0][0][0][i], y_rms=d['data']['y_rms'][0][0][0][i], tmit_rms=d['data']['tmit_rms'][0][0][0][i], x_severity=d['data']['x_severity'][0][0][0][i], y_severity=d['data']['y_severity'][0][0][0][i], tmit_severity=d['data']['tmit_severity'][0][0][0][i])
orbit.append(bpm)
orbit.name = os.path.basename(filepath)
return orbit
@classmethod
def from_json_file(cls, filepath):
with open(filepath) as json_file:
d = json.load(json_file)
orbit = cls.from_dict(d)
orbit.name = os.path.basename(filepath)
return orbit
def __init__(self, name=None, parent=None):
super(BaseOrbit, self).__init__(parent=parent)
self._bpms = []
self._bpm_name_dict = {}
self._zmin = None
self._zmax = None
self._rmat_cache = None
self._rmats_for_fit = None
self._zs_for_fit = None
self._saved_fit_start = None
self._saved_fit_end = None
self._saved_fit_point = None
self.name = name
self.fit_data = None
def __str__(self):
return str(self.name)
@property
def bpms(self):
return self._bpms
@bpms.setter
def bpms(self, new_bpms):
self._clear_all_caches()
self._bpms = new_bpms
self._bpm_name_dict = {}
for bpm in new_bpms:
self._bpm_name_dict[bpm.name] = bpm
def append(self, new_bpm):
if new_bpm.name in self._bpm_name_dict:
raise ValueError('Orbit already contains a BPM named "{}". BPM names in an orbit must be unique.'.format(new_bpm.name))
self._bpm_name_dict[new_bpm.name] = new_bpm
self._bpms.append(new_bpm)
def bpm_with_name(self, name):
return self._bpm_name_dict[name]
def _clear_z_cache(self):
self._zmin = None
self._zmax = None
def _clear_all_caches(self):
self._rmat_cache = None
self._clear_z_cache()
def _find_z_min_and_max(self):
for bpm in self.bpms:
if (self._zmin is None) or bpm.z < self._zmin:
self._zmin = bpm.z
if (self._zmax is None) or bpm.z > self._zmax:
self._zmax = bpm.z
def zmin(self):
if self._zmin is None:
self._find_z_min_and_max()
return self._zmin
def zmax(self):
if self._zmax is None:
self._find_z_min_and_max()
return self._zmax
def xmin(self):
xmin = None
for bpm in self.bpms:
if bpm.x < xmin or (xmin is None):
xmin = bpm.x
return xmin
def xmax(self):
xmax = None
for bpm in self.bpms:
if bpm.x > xmax or (xmax is None):
xmax = bpm.x
return xmax
def ymin(self):
ymin = None
for bpm in self.bpms:
if bpm.y < ymin or (ymin is None):
ymin = bpm.y
return ymin
def ymax(self):
ymax = None
for bpm in self.bpms:
if bpm.y > ymax or (ymax is None):
ymax = bpm.y
return ymax
def tmitmin(self):
tmitmin = None
for bpm in self.bpms:
if bpm.tmit < tmitmin or (tmitmin is None):
tmitmin = bpm.tmit
return tmitmin
def tmitmax(self):
tmitmax = None
for bpm in self.bpms:
if bpm.tmit > tmitmax or (tmitmax is None):
tmitmax = bpm.tmit
return tmitmax
def names(self):
return [bpm.name for bpm in self.bpms]
def vals(self, axis):
return [getattr(bpm,axis) for bpm in self.bpms]
def x_vals(self):
return self.vals('x')
def y_vals(self):
return self.vals('y')
def tmit_vals(self):
return self.vals('tmit')
def x_rms_vals(self):
return self.vals('x_rms')
def y_rms_vals(self):
return self.vals('y_rms')
def tmit_rms_vals(self):
return self.vals('tmit_rms')
def z_vals(self):
return self.vals('z')
def x_severity_vals(self):
return self.vals('x_severity')
def y_severity_vals(self):
return self.vals('y_severity')
def tmit_severity_vals(self):
return self.vals('tmit_severity')
def x_status_vals(self):
return self.vals('x_status')
def y_status_vals(self):
return self.vals('y_status')
def tmit_status_vals(self):
return self.vals('tmit_status')
def sort_bpms_by_z(self):
self.bpms.sort(key=attrgetter('z'))
def export_to_json(self, filename):
raise NotImplementedError
def __getitem__(self, item):
return self.bpms[item]
def __len__(self):
return len(self._bpms)
def __iter__(self):
return iter(self._bpms)
def to_static(self):
return self
def to_dict(self):
d = {'x': self.vals('x'), 'y': self.vals('y'), 'tmit': self.vals('tmit'), 'z': self.vals('z'), 'x_rms': self.vals('x_rms'), 'y_rms': self.vals('y_rms'), 'tmit_rms': self.vals('tmit_rms'), 'x_severity': self.vals('x_severity'), 'y_severity': self.vals('y_severity'), 'tmit_severity': self.vals('tmit_severity')}
d['names'] = self.names()
return d
def save_to_file(self, filepath='default'):
self.save_to_MATLAB_file(filepath)
self.save_to_json(filepath)
def save_to_MATLAB_file(self, filepath='default'):
if filepath == 'default':
filename = "orbit"
else:
(filepath, filename) = os.path.split(filepath)
matlog.save(filename, self.to_dict(), filepath, oned_as="row")
def save_to_json_file(self, filepath='default'):
if filepath=='default':
filename = "orbit-{}.json".format(datetime.now().strftime("%Y-%m-%d-%H%M%S"))
filepath = os.path.join("/home/physics/mgibbs/orbit_data/", filename)
with open(filepath, 'w') as f:
json.dump(self.to_dict(), f)
def sector_locations(self):
# This assumes BPM names follow the pattern BPMS:SECTOR:UNITNUMBER
ticks = []
current_sector = None
for (i, bpm) in enumerate(self.bpms):
bpm_sector = bpm.name.split(':',2)[1]
if current_sector != bpm_sector:
current_sector = bpm_sector
if i==0:
z = bpm.z
else:
z = (bpm.z + self.bpms[i-1].z)/2.0
ticks.append((z, current_sector))
return ticks
def rmats(self, from_device=None):
"""Get R-Matrices for each BPM from the model.
The matrices for this orbit are then cached, for performance reasons.
If the orbit's bpms property changes, this cache is cleared.
Returns
-------
numpy.ndarray
A Nx6x6 numpy array with R matrices for each BPM. N is equal to len(self.bpms)."""
if self._rmat_cache is None:
self._rmat_cache = model.get_rmat(self.names())
return self._rmat_cache
def fit(self, start, end, fit_point, fit_xpos=True, fit_xang=True, fit_ypos=True, fit_yang=True, fit_energy_difference=True, fit_xkick=True, fit_ykick=True, opt_dict=None):
"""Fit a trajectory to the orbit's BPM readings, given the R-matrices at each BPM.
Parameters:
----------
start : BaseBPM or int
The first BPM to include in the fit.
end : BaseBPM or int
The last BPM to include in the fit.
fit_point : BaseBPM or int
The BPM to use for the initial fit point.
fit_xpos : Optional[bool]
Whether or not to fit the x position.
fit_xang : Optional[bool]
Whether or not to fit the x angle.
fit_ypos : Optional[bool]
Whether or not to fit the y position.
fit_yang : Optional[bool]
Whether or not to fit the y angle.
fit_energy_difference : Optional[bool]
Whether or not to fit dE/E.
fit_xkick : Optional[bool]
Whether or not to fit the x kick.
fit_ykick : Optional[bool]
Whether or not to fit the y kick.
opt_dict : Optional[dict]
A dictionary which provides keys matching any of the above options.
Returns
-------
dict
A dictionary with the following keys:
'xpos': a numpy.ndarray of fitted x trajectories in mm.
'ypos': a numpy.ndarray of fitted y trajectories in mm.
'xpos0': the fitted x position at z0 in mm.
'xang0': the fitted x angle at z0 in mrad.
'ypos0': the fitted y position at z0 in mm.
'yang0': the fitted y angle at z0 in mrad.
'dE/E': the fitted dE/E in parts per 1000. Only returned if R16 or R36 > 10mm in the fit region.
'xkick': the fitted x kick in mrad.
'ykick': the fitted y kick in mrad.
'dp': A dictionary with all of the above keys, but this time with values representing the fitted error.
Raises
------
Exception
If the machine model is not available (usually this is because EPICSv4 could not be imported)
ValueError
If the number of R matrices in Rs does not equal the number of BPMs in the orbit.
"""
#This method is really crappy - its an almost line-for-line port of the MATLAB BPM GUI's fitting routine,
# which ends up really awkward in numpy. There are probably performance gains to be had here - unnecessary
# copies, unnecessary conversions between numpy arrays and numpy matrices, etc.
if MODEL_AVAILABLE == False:
raise Exception("Model is not available, cannot perform fitting.")
if isinstance(start, BaseBPM):
start_index = self.bpms.index(start)
else:
start_index = int(start)
if isinstance(end, BaseBPM):
end_index = self.bpms.index(end)
else:
end_index = int(end)
if isinstance(fit_point, BaseBPM):
z0 = fit_point.z
fit_point_index = self.bpms.index(fit_point)
else:
fit_point_index = int(fit_point)
z0 = self.bpms[fit_point_index].z
if start_index == self._saved_fit_start and end_index == self._saved_fit_end and fit_point_index == self._saved_fit_point:
Rs = self._rmats_for_fit
else:
Rs = model.get_rmat(self.bpms[fit_point_index].name, self.names())
self._rmats_for_fit = Rs
self._saved_fit_start = start_index
self._saved_fit_end = end_index
self._saved_fit_point = fit_point_index
Rs = Rs[start_index:end_index+1]
zs = np.array(self.z_vals())[start_index:end_index+1]
xs = np.array(self.x_vals())[start_index:end_index+1]
ys = np.array(self.y_vals())[start_index:end_index+1]
dxs = np.array(self.x_rms_vals())[start_index:end_index+1]
#dxs = np.ones(len(zs))/1.0e-3
dys = np.array(self.y_rms_vals())[start_index:end_index+1]
#dys = np.ones(len(zs))/1.0e-3
tmit_sevrs = np.array(self.tmit_severity_vals())[start_index:end_index+1]
#Filter out BPMS with bad TMIT severity (usually a good indicator of no beam)
tmit_good = np.where(tmit_sevrs == 0)
tmit_sevrs = tmit_sevrs[tmit_good]
num_bpms = len(tmit_sevrs)
if num_bpms == 0:
self.fit_data = None
raise NoValidBPMDataException("No BPMS with sufficient TMIT for fit.")
zs = zs[tmit_good]
xs = xs[tmit_good]
ys = ys[tmit_good]
dxs = dxs[tmit_good]
dys = dys[tmit_good]
Rs = Rs[tmit_good]
if Rs.shape != (num_bpms,6,6):
raise ValueError("Number of R matrices in Rs does not equal the number of BPMs in the orbit.")
xsf = np.zeros((1, num_bpms))
ysf = np.zeros((1, num_bpms))
#Grab just the R1s and R3s, except for R15 and R35.
R1s = Rs[:, 0, [0, 1, 2, 3, 5]]
R3s = Rs[:, 2, [0, 1, 2, 3, 5]]
if not (np.any(np.abs(R1s[:, 4]) > 0.010) or np.any(np.abs(R3s[:,4]) > 0.010)):
#Not enough dispersion to fit energy, disabling energy fit.
fit_energy_difference = False
I = np.where(np.array([fit_xpos, fit_xang, fit_ypos, fit_yang, fit_energy_difference, fit_xkick, fit_ykick]))[0]
R1s_kick = R1s.copy()
R3s_kick = R3s.copy()
R1s_kick[zs <= z0, :] = 0.0
R3s_kick[zs <= z0, :] = 0.0
R1s_kick = np.mat(R1s_kick)
R3s_kick = np.mat(R3s_kick)
R1s = np.append(R1s, R1s_kick[:,1], 1)
R1s = np.append(R1s, R1s_kick[:,3], 1)
R3s = np.append(R3s, R3s_kick[:,1], 1)
R3s = np.append(R3s, R3s_kick[:,3], 1)
Q = np.append(R1s[:, I], R3s[:, I], 0)
S = np.mat( | np.append(xs, ys, 0) | numpy.append |
import code # For development: code.interact(local = dict(globals(), **locals()))
import numpy as np
import xml.etree.ElementTree as et
from scipy.io import netcdf
import matplotlib.dates as mdates
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import math
# =============================================================================
#
# This module processes and compares model output to census benchmarks.
# Currently, this module assumes that FATES size structured outputs are
# available. Benchmarks are prepared in another script, one such script
# is the NGEET/tools_benchmarking_evaluation set.
#
# Here is the list of model output needed:
#
# Basal Area: BA_SCPF (SIZE x PFT)
# Diameter Increment: DDBH_SCPF (SIZE x PFT) / NPLANT_SCPF
# Mortality Rate: (M1_SCPF + M2_SCPF + M3_SCPF + M4_SCPF + M5_SCPF +
# M6_SCPF + M7_SCPF + M8_SCPF) / NPLANT_SCPF
# Recruitment Rate: RECRUITMENT (PFT)
#
#
#
#
# =============================================================================
# The CTFS processed files use an invalid flag of -9e+30
# We will consider anything very large negative invalid
invalid_flag = -9.9e10
# Anything that is a rate, needs to be normalized by the number of plants
# This is a restriction on
nplant_scpf_name = 'NPLANT_SCPF'
# BA_SCPF (SIZE x PFT)
# DDBH_SCPF (SIZE x PFT)
# (M1_SCPF + M2_SCPF + M3_SCPF + M4_SCPF + M5_SCPF + M6_SCPF + M7_SCPF + M8_SCPF) / NPLANT_SCPF
# RECRUITMENT (PFT)
# This object is bound to each site
# It should contain a list of viable benchmarks
class benchmark_obj:
def __init__(self,census_filename):
self.bvarlist = []
self.census_filename = census_filename
# Lets check through the census file and see if any of these variables
# are in the file. We will later look through the model output
# and pop off list entries that are not there.
if(census_filename.strip() != ''):
print("Loading census file: {}".format(census_filename))
fp = netcdf.netcdf_file(census_filename, 'r', mmap=False)
cens_var_name = 'basal_area_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Basal Area', \
mod_symbols = 'BA_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = 'm2/ha', \
vartype = 'quantity'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
cens_var_name = 'growth_increment_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Growth Increment', \
mod_symbols = 'DDBH_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = 'cm/yr', \
vartype = 'rate'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
cens_var_name = 'mortality_rate_by_size_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Mortality Rate', \
mod_symbols = 'M1_SCPF,M2_SCPF,M3_SCPF,M4_SCPF,M5_SCPF,M6_SCPF,M7_SCPF,M8_SCPF', \
obs_symbol = cens_var_name, \
mod_dimclass = 'scpf', \
obs_dimclass = 'size-class', \
unit = '/yr', \
vartype = 'rate'))
self.bvarlist[-1].load_census(fp)
cens_var_name = 'new_recruits_by_census'
if (fp.variables.has_key(cens_var_name)):
self.bvarlist.append(benchmark_vars( name = 'Recruitment Rate', \
mod_symbols = 'RECRUITMENT', \
obs_symbol = cens_var_name, \
mod_dimclass = 'pft', \
obs_dimclass = 'scalar', \
unit = 'indv ha-1 yr-1', \
vartype = 'quantity'))
self.bvarlist[-1].load_census(fp)
else:
print('Census variable: '+cens_var_name+', was not found in the census file')
fp.close()
# ===================================================================================
## Check the first history file in the list to see which benchmarking variables
# are available.
def init_history(self,hist_file0,n_htypes):
fp = netcdf.netcdf_file(hist_file0, 'r', mmap=False)
for bvar in self.bvarlist:
all_symbols_found = True
for mod_symbol in bvar.mod_symbols:
if( not fp.variables.has_key(mod_symbol) ):
all_symbols_found = False
if( all_symbols_found ):
bvar.active = True
if( bvar.mod_dimclass == 'scpf'):
dims = fp.variables[bvar.mod_symbols[0]].dimensions
if(dims[1] != 'fates_levscpf'):
print('An SCPF benchmark variable: {} does not actually'.format(bvar.mod_symbols[0]))
print(' have the correct dimensions: {}... exiting'.format(dims))
exit(2)
fates_levscls = fp.variables['fates_levscls'].data
if (fates_levscls[0] == 0.0):
bvar.offset0 = True
if(fates_levscls.size-1 != bvar.scv_obs_ar.shape[0]):
print('Dimensions of model output size-classes dont match observations')
for isc,scvar in enumerate(fates_levscls[1:]):
if( np.abs(scvar- bvar.scv_x[isc])>1.0e-10 ):
print('Dimensions of model output size-classes dont match observations')
print('Observed classes: {}'.format(bvar.scv_x))
print('Modeled (0 is ignored): {}',format(fates_levscls))
else:
bvar.offset0 = False
if(fates_levscls.size != bvar.scv_obs_ar.shape[0]):
print('Dimensions of model output size-classes dont match observations')
for isc,scvar in enumerate(fates_levscls[:]):
if( np.abs(scvar- bvar.scv_x[isc])>1.0e-10 ):
print('Dimensions of model output size-classes dont match observations')
print('Observed classes: {}'.format(bvar.scv_x))
print('Modeled (0 is ignored): {}',format(fates_levscls))
d_sizes = bvar.scv_obs_ar.shape
bvar.modlist = []
for imod in range(n_htypes):
bvar.modlist.append(mod_scv_array(d_sizes[0]))
elif( bvar.mod_dimclass == 'pft' ):
dims = fp.variables[bvar.mod_symbols[0]].dimensions
if(dims[1] != 'fates_levpft'):
print('A PFT benchmark variable: {} does not actually'.format(bvar.mod_symbols[0]))
print(' have the correct dimensions: {}... exiting'.format(dims))
exit(2)
fates_levpft = fp.variables['fates_levpft'].data
scalar_size = 1
bvar.modlist = []
for imod in range(n_htypes):
bvar.modlist.append(mod_scv_array(scalar_size))
fp.close()
# ===================================================================================
def load_history(self,filename,h_index,site_index):
# Objective is to push new estimates of the benchmark variables
fp = netcdf.netcdf_file(filename, 'r', mmap=False)
#code.interact(local = dict(globals(), **locals()))
for bvar in self.bvarlist:
if(bvar.active):
d_sizes = fp.variables[bvar.mod_symbols[0]].shape
hist_arr = | np.ma.zeros(fp.variables[bvar.mod_symbols[0]].shape) | numpy.ma.zeros |
"""
Functions used in Metabolite annotation
"""
import numpy as np
from scipy.interpolate import interpolate
from scipy.optimize import leastsq
def find(a,cond):
b=np.nonzero(cond)
return b
def cauchy(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
arg=((1/(p3[0]*3.14159))/((1/p3[0])**2+(X*X)))*((1/(p3[1]*3.14159))/((1/p3[1])**2+(Y*Y)))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def expon(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=(1/6.28)*(1/p3[0])*np.exp(-X*X/(2*p3[0]**2))*(1/p3[1])*np.exp(-X*X/(2*p3[1]**2))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def exponcauchy(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=(1/3.14159)*(1/p3[0])*np.exp(-X*X/(2*p3[0]**2))*((1/(p3[1]*3.14159))/((1/p3[1])**2+(Y*Y)))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def cauchyexpon(p2,p3):
siz=(p2-1)/2*(np.ones(2))
xxrange = np.arange(-siz[1],siz[1]+1)
yyrange = np.arange(-siz[1],siz[1]+1)
X,Y = np.meshgrid(xxrange,yyrange)
#arg=((1/p3[0])/((1/p3[0])**2+(X*X)))*((1/p3[1])/((1/p3[1])**2+(Y*Y)))
arg=((1/(p3[0]*3.14159))/((1/p3[0])**2+(X*X)))*(1/3.14159)*(1/p3[1])*np.exp(-Y*Y/(2*p3[0]**2))
eps=2.2204*10**(-16)
h=arg
h[h<(eps*np.amax(h))]=0
sumh=np.sum(h)
if sumh!=0:
h=h/sumh
h=h/np.amax(h)
return h
def subpix2(z,ii,jj):
trange = np.arange(7)
ttrange = np.arange(7)
X,Y = np.meshgrid(trange,ttrange)
outgrid = interpolate.interp2d(X,Y,z,kind='quintic')
xx=yy=np.arange(61)/10.
l=outgrid(xx,yy)
l=l[30-9:30+10,30-9:30+10]
ind=find(l,l==np.amax(l))
#print l
#print ind[0][0],ind[1][0]
ni=ii+(ind[0][0]-9.)/10
nj=jj+(ind[1][0]-9.)/10
#print ii,jj
#print ni,nj
return[ni,nj]
def dephcl(z):
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*cauchy(19,v)),1)
vi=[0.3,0.3]
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if v[0]<0.001 or v[0]>2 or v[1]<0.001 or v[1]>2 :
v[0]=v[1]=0.3+np.random.normal(0, 0.05, 1)
return v
def dephcg(z):
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*expon(19,v)),1)
vi=[1,1]
#z[z<0]=0
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if v[0]<0.1 or v[0]>4 or v[1]<0.1 or v[1]>4 :
v[0]=v[1]=2+np.random.normal(0, 0.05, 1)
return v
def dephcaprio(z,a,b,c):
if c[0]=='g' and c[1]=='g':
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*expon(19,v)),1)
vi=[a,b]
#z[z<0]=0
v, success = leastsq(e, vi, args=(z), maxfev=1000)
if np.abs(float(v[0]-a))>1 or v[0]<0.5 or v[0]>6:
v[0]=a+np.random.normal(0, 0.05, 1)
if np.abs(float(v[1]-b))>1 or v[0]<0.5 or v[0]>6:
v[1]=b+np.random.normal(0, 0.05, 1)
if c[0]=='l' and c[1]=='l':
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*cauchy(19,v)),1)
a=float(1/float(a))
b=float(1/float(b))
vi=[a,b]
v, success = leastsq(e, vi, args=(z), maxfev=1000)
#print vi
if np.abs(float(v[0]-float(1/float(a))))>0.5 or v[0]<0.08 or v[0]>4:
v[0]=a+np.random.normal(0, 0.05, 1)
#print float(1/float(a))
v[0]=1/v[0]
if np.abs(float(v[1]-float(1/float(b))))>0.5 or v[1]<0.08 or v[1]>4:
v[1]=b+np.random.normal(0, 0.05, 1)
v[1]=1/v[1]
if c[0]=='g' and c[1]=='l':
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*exponcauchy(19,v)),1)
b=float(1/float(b))
vi=[a,b]
v, success = leastsq(e, vi, args=(z), maxfev=1000)
#print 'ham',v
#print vi
if np.abs(float(v[0]-a))>1 or v[0]<0.5 or v[0]>6:
v[0]=a+np.random.normal(0, 0.05, 1)
#print float(1/float(a))
if np.abs(float(v[1]-float(b)))>0.5 or v[1]<0.08 or v[1]>4:
v[1]=b+np.random.normal(0, 0.05, 1)
v[1]=1/v[1]
if c[0]=='l' and c[1]=='g':
e = lambda v,z,: np.sum(np.abs(z-z[9,9]*cauchyexpon(19,v)),1)
a=float(1/float(a))
vi=[a,b]
v, success = leastsq(e, vi, args=(z), maxfev=1000)
#print vi
if np.abs(float(v[1]-b))>1 or v[0]<0.5 or v[0]>6:
v[1]=b+np.random.normal(0, 0.05, 1)
#print float(1/float(a))
if np.abs(float(v[0]-float(1/float(a))))>0.5 or v[1]<0.08 or v[1]>4:
v[0]=a+np.random.normal(0, 0.05, 1)
v[0]=1/v[0]
#print c
#print vi
#print v
return v,c
def exp_hand(z,newn):
c=0
for i in range(len(z)):
if z[i]==newn:
c=1
break
return c
def exp_yn(amp,ampref,test):
artest=np.array(test)
ol=np.nonzero(artest==1)
#print np.size(ol)
#print len(amp)/2
#print amp
if np.size(ol)>len(amp)/2:
ver=0
else:
o=np.nonzero(artest==0)
vamp=np.array(amp)
vampref=np.array(ampref)
ivamref=vampref[o]
ivam=vamp[o]
ii=ivamref[ivamref==np.amax(ivamref)]
jj=ivam[ivamref==np.amax(ivamref)]
ver=1
if np.size(ol)>1:
ol=ol[0]
#print np.size(ol)
#print vampref
#print vamp
#print ol
if np.size(ol)>1:
for kkk in range(np.size(ol)):
#print float(((vampref[ol[kkk]]/ii)/(vamp[ol[kkk]]/jj)))
if (((vampref[ol[kkk]]/ii)/(vamp[ol[kkk]]/jj)))>50 or (((vampref[ol[kkk]]/ii)/(vamp[ol[kkk]]/jj)))<0.0200:
#print 'lela'
ver=0
else:
for kkk in range(np.size(ol)):
#print float(((vampref[ol[kkk]][0]/ii)/(vamp[ol[kkk]][0]/jj))[0]) <0.001
if (((vampref[ol[kkk]][0]/ii)/(vamp[ol[kkk]][0]/jj))[0])>50 or (((vampref[ol[kkk]][0]/ii)/(vamp[ol[kkk]][0]/jj))[0])<0.0200:
#print 'lela'
ver=0
return ver
def exp_ync(amp,ampref,test):
artest=np.array(test)
ol= | np.nonzero(artest==1) | numpy.nonzero |
# -*- coding: utf-8 -*-
"""
Модуль определения места повреждения (КЗ) на высоковольтных ВЛ
Базируется на упрощенных способах одностороннего замера, описанных в
книге Аржанникова <NAME>. Определение места
короткого замыкания на высоковольтных линиях электропередачи /
Под ред. <NAME>. - М: Энергоатомиздат, 2003.
"""
import numpy as np
r2d = 180/np.pi
a = -0.5 + 0.5j*np.sqrt(3)
a2 = -0.5 - 0.5j*np.sqrt(3)
a0 = 1.0 + 0.0j
v1 = np.array([a0,a,a2])/3
v2 = np.array([a0,a2,a])/3
v0 = np.array([a0,a0,a0])/3
vA = np.array([a0,a0,a0])
vB = np.array([a2,a ,a0])
vC = np.array([a, a2,a0])
vAB = vA - vB
vBC = vB - vC
vCA = vC - vA
Mf2s = | np.array([v1,v2,v0]) | numpy.array |
"""
This file has functions about bounding box processing.
"""
import numpy as np
def bbox_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_pred(boxes, box_deltas, is_train=False):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
if is_train:
dx = np.array(map(lambda x: | np.sign(x) | numpy.sign |
import numpy as np
from lmfit import Parameters
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
from utils import find_minmax
from PeakFunctions import Gaussian, LogNormal
class Sphere:
def __init__(self, x=0.001, R=1.0, Rsig=0.0, dist='Gaussian', N=50, integ='Trapezoid', rhoc=1.0, rhosol=0.0, norm=1.0, bkg=0.0,mpar={}):
"""
Calculates the form factor of a solid sphere with size distribution
x : Array of q-values in the same reciprocal unit as R and Rsig
R : Mean radius of the solid spheres
Rsig : Width of the distribution of solid spheres
dist : Gaussian or LogNormal
N : No. of points on which the distribution will be calculated
integ : The type of integration ('Trapizoid' or 'MonteCarlo') Default: 'Trapezoid'
rhoc : Electron density of the particle
rhosol : Electron density of the solvent or surrounding environment
"""
if type(x)==list:
self.x=np.array(x)
else:
self.x=x
self.R=R
self.Rsig=Rsig
self.dist=dist
self.rhoc=rhoc
self.rhosol=rhosol
self.norm=norm
self.integ=integ
self.bkg=bkg
self.N=N
self.__mpar__=mpar
self.choices={'dist':['Gaussian','LogNormal'],'integ':['Trapezoid','MonteCarlo']}
self.init_params()
self.output_params = {'scaler_parameters': {}}
def init_params(self):
self.params=Parameters()
self.params.add('R',value=self.R,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('Rsig',value=self.Rsig,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('rhoc',value=self.rhoc,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('rhosol',value=self.rhosol,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('norm',value=self.norm,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
self.params.add('bkg',value=self.bkg,vary=0,min=-np.inf,max=np.inf,expr=None,brute_step=0.1)
def y(self):
rho=self.rhoc-self.rhosol
if self.Rsig<1e-3:
return self.norm*rho**2*16*np.pi**2*(np.sin(self.x*self.R)-self.x*self.R*np.cos(self.x*self.R))**2/self.x**6+self.bkg
else:
if self.integ=='Trapezoid':
if self.dist=='Gaussian':
gau=Gaussian.Gaussian(x=0.001,pos=self.R,wid=self.Rsig)
rmin, rmax = max(0.001, self.R-5*self.Rsig),self.R+5*self.Rsig
r=np.linspace(rmin,rmax,self.N)
gau.x=r
dist=gau.y()
sumdist=np.sum(dist)
self.output_params['Distribution']={'x':r,'y':dist/sumdist}
self.output_params['scaler_parameters']['Rmean']=self.R
self.output_params['scaler_parameters']['Rwidth']=2.35482*self.Rsig
print(dist)
if type(self.x)==np.ndarray:
ffactor=[]
for x in self.x:
f=np.sum(16.0*np.pi**2*(np.sin(x*r)-x*r*np.cos(x*r))**2*dist/x**6)
ffactor.append(f/sumdist)
return self.norm*rho**2*np.array(ffactor)+self.bkg
else:
return self.norm*rho**2*np.sum(16*np.pi**2*(np.sin(self.x*r)-self.x*r*np.cos(self.x*r))**2*dist/self.x**6)/sumdist+self.bkg
elif self.dist=='LogNormal':
lgn=LogNormal.LogNormal(x=0.001,pos=self.R,wid=self.Rsig)
rmin,rmax=max(0.001, np.exp(np.log(self.R) - 5*self.Rsig)), np.exp(np.log(self.R) + 5.0*self.Rsig)
r=np.logspace(np.log10(rmin),np.log10(rmax),self.N)
lgn.x=r
dist=lgn.y()
sumdist=np.sum(dist)
self.output_params['Distribution']={'x':r,'y':dist/sumdist}
self.output_params['scaler_parameters']['Rmean'] = np.exp(np.log(self.R)+self.Rsig**2/2)
self.output_params['scaler_parameters']['Rwidth'] = np.sqrt((np.exp(self.Rsig**2)-1)*np.exp(2*np.log(self.R)+self.Rsig**2))
if type(self.x)==np.ndarray:
ffactor=[]
for x in self.x:
f=np.sum(16*np.pi**2*(np.sin(x*r)-x*r*np.cos(x*r))**2*dist/x**6)
ffactor.append(f/sumdist)
return self.norm*rho**2*np.array(ffactor)+self.bkg
else:
return self.norm*rho**2*np.sum(16*np.pi**2*(np.sin(self.x*r)-self.x*r*np.cos(self.x*r))**2*dist/self.x**6)/sumdist+self.bkg
else:
np.random.seed(100)
if self.dist == 'Gaussian':
r=np.sort(np.random.normal(self.R,self.Rsig,10000))
gau = Gaussian.Gaussian(x=r, pos=self.R, wid=self.Rsig)
dist = gau.y()
sumdist = np.sum(dist)
self.output_params['Distribution'] = {'x': r, 'y': dist / sumdist}
self.output_params['scaler_parameters']['Rmean'] = self.R
self.output_params['scaler_parameters']['Rwidth'] = 2.35482 * self.Rsig
if type(self.x) == np.ndarray:
ffactor = []
for x in self.x:
f = np.sum(16.0*np.pi**2*(np.sin(x * r) - x * r * | np.cos(x * r) | numpy.cos |
"""
Creates Figure 4 -- Model Interpretation
"""
from os.path import abspath, dirname
from string import ascii_uppercase
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import scale
from sklearn.utils import resample
from .common import getSetup
from ..dataImport import form_tensor, import_cytokines
from ..predict import run_model, predict_regression
from tensorpack import perform_CMTF
N_BOOTSTRAP = 30
PATH_HERE = dirname(dirname(abspath(__file__)))
TARGETS = ['status', 'gender', 'race', 'age']
def bootstrap_weights():
"""
Predicts samples with unknown outcomes.
Parameters:
None
Returns:
weights (pandas.DataFrame): mean and StD of component weights w/r to
prediction targets
"""
tensor, matrix, patient_data = form_tensor()
patient_data = patient_data.reset_index(drop=True)
patient_data = patient_data.loc[patient_data['status'] != 'Unknown']
components = perform_CMTF(tensor, matrix)
components = components[1][0]
components = components[patient_data.index, :]
stats = ['Mean', 'StD']
index = pd.MultiIndex.from_product([TARGETS, stats])
weights = pd.DataFrame(
index=index,
columns=list(range(1, components.shape[1] + 1))
)
for target in TARGETS:
coef = []
for sample in range(N_BOOTSTRAP):
data, labels = resample(components, patient_data.loc[:, target])
if target == 'age':
_, _coef = predict_regression(data, labels, return_coef=True)
else:
_, _, _coef = run_model(data, labels, return_coef=True)
coef.append(_coef)
coef = scale(coef, axis=1)
weights.loc[(target, 'Mean'), :] = np.mean(coef, axis=0)
weights.loc[(target, 'StD'), :] = np.std(coef, axis=0, ddof=1)
return weights
def tfac_setup():
"""
Import cytokine data and correlate tfac components to cytokines and
data sources.
Parameters:
None
Returns:
subjects (pandas.DataFrame): patient correlations to tfac components
cytos (pandas.DataFrame): cytokine correlations to tfac components
source (pandas.DataFrame): cytokine source correlations to tfac
components
pat_info (pandas.DataFrame): patient meta-data
"""
tensor, matrix, pat_info = form_tensor()
plasma, _ = import_cytokines()
cytokines = plasma.index
pat_info.loc[:, 'sorted'] = range(pat_info.shape[0])
pat_info = pat_info.sort_values(['cohort', 'type', 'status'])
sort_idx = pat_info.loc[:, 'sorted']
pat_info = pat_info.drop('sorted', axis=1)
pat_info = pat_info.T
factors = perform_CMTF(tensor, matrix)
col_names = [f"Cmp. {i}" for i in np.arange(1, factors.rank + 1)]
subjects = pd.DataFrame(
factors.factors[0][sort_idx, :],
columns=col_names,
index=[str(x) for x in pat_info.columns]
)
cytos = pd.DataFrame(
factors.factors[1],
columns=col_names,
index=cytokines
)
source = pd.DataFrame(
factors.factors[2],
columns=col_names,
index=["Serum", "Plasma"]
)
return subjects, cytos, source, pat_info
def plot_results(weights, subjects, cytos, source, pat_info):
"""
Plots component weights and interpretation.
Parameters:
weights (pandas.DataFrame): mean and StD of component weights w/r to
prediction targets
subjects (pandas.DataFrame): patient correlations to tfac components
cytos (pandas.DataFrame): cytokine correlations to tfac components
source (pandas.DataFrame): cytokine source correlations to tfac
components
pat_info (pandas.DataFrame): patient meta-data
"""
fig_size = (5, 5)
layout = {
# 'height_ratios': [1, 0.5],
'hspace': 0.3,
'ncols': 1,
'nrows': 2,
'wspace': 0
}
_, fig, gs = getSetup(
fig_size,
layout,
style=None
)
top_gs = gs[0].subgridspec(
ncols=3,
nrows=1,
width_ratios=[35, 4, 25]
)
bottom_gs = gs[1].subgridspec(
ncols=14,
nrows=1,
width_ratios=[25, 12, 1, 5, 1, 5, 1, 1, 1, 1, 1, 25, 1, 1],
wspace=0
)
for gs in [top_gs, bottom_gs]:
for col in range(gs.ncols):
fig.add_subplot(gs[col])
fig.delaxes(fig.axes[0])
fig.delaxes(fig.axes[0])
axs = fig.axes
for ax in axs:
ax.set_frame_on(False)
axs[0].set_frame_on(True)
spacers = [1, 4, 5, 6, 8, 10, 16]
for spacer in spacers:
axs[spacer].set_xticks([])
axs[spacer].set_yticks([])
# Determine scale
vmin = min(subjects.values.min(), cytos.values.min(), source.values.min())
vmax = max(subjects.values.max(), cytos.values.max(), source.values.max())
# Plot main graphs
sns.heatmap(
subjects,
cmap="PRGn",
center=0,
xticklabels=True,
yticklabels=False,
cbar_ax=axs[15],
vmin=vmin,
vmax=vmax,
ax=axs[14]
)
sns.heatmap(
cytos,
cmap="PRGn",
center=0,
yticklabels=True,
cbar=False,
vmin=vmin,
vmax=vmax,
ax=axs[2]
)
sns.heatmap(
source,
cmap="PRGn",
center=0,
yticklabels=True,
cbar=False,
vmin=vmin,
vmax=vmax,
ax=axs[3]
)
axs[2].set_yticklabels(cytos.index, fontsize=7)
axs[3].set_yticklabels(["Serum", "Plasma"], rotation=0)
axs[3].set_xticks( | np.arange(0.5, source.shape[1]) | numpy.arange |
# reference EVO toolkit
from evo.tools import file_interface
from evo.tools.file_interface import csv_read_matrix, FileInterfaceException
from pysvso.lib.maths.rotation import Euler, Quaternion
import numpy as np
import logging
from pysvso.lib.log import LoggerAdaptor
_logger = logging.getLogger("validation.tum")
from pysvso.config import Settings
settings = Settings()
TUM_DATASET_NAME = settings.DATASET_NAME # "rgbd_dataset_freiburg1_xyz"
HDD = settings.HDD # "/home/yiakwy"
ROOT = settings.ROOT # "{hdd}/WorkSpace".format(hdd=HDD)
REPO = settings.REPO # "SEMANTIC_SLAM"
PROJECT_ROOT = settings.PROJECT_ROOT # "{root}/Github/{repo}".format(root=ROOT, repo=REPO)
# TUM_DATA_DIR = "{project_base}/data/tum/{dataset_name}".format(project_base=Project_base,
# dataset_name=TUM_DATASET_NAME)
TUM_DATA_DIR = settings.DATA_DIR
Project_base = PROJECT_ROOT
class Trajectory3D:
# Implements STL iterator
class Trajectory3DIterator(object):
def __init__(self, trajectory):
self._trajectory = trajectory
self.counter = self.__counter__()
def __iter__(self):
return self
def __counter__(self):
l = len(self._trajectory)
# one dimension index
ind = 0
while True:
yield ind
ind += 1
if ind >= l:
break
def __next__(self):
try:
ind = next(self.counter)
return self._trajectory[ind]
except StopIteration:
raise StopIteration()
def __str__(self):
return "Trajectory iterator"
def __init__(self, timestamp, rots, trans):
self.timestamps = timestamp
# Rotations
self.rots = rots
# Translations
self.trans = trans
pass
def __iter__(self):
data = np.c_[self.timestamps, self.trans, self.rots]
return Trajectory3D.Trajectory3DIterator(data)
# @todo TODO return aligned pose data
# This parts implements EVO synchronoization algorithm to fetch ground truth for a picture timestamp specified
def query_timestamp(self, timestamp, max_diff=0.01):
diffs = | np.abs(self.timestamps - timestamp) | numpy.abs |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 14:51:29 2019
@author: nk7g14
This for some reason runs fine in the linux terminal but doesn't in the ipython one?
https://heasarc.nasa.gov/ftools/caldb/help/uvotsource.html
"""
import logging
import subprocess
import glob
from astropy.io import fits
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
import os
import auxil as aux
import swift
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s -- %(message)s')
home_path = os.getcwd()
img_path = 'sources/NGC1313/swift/uvot/img'
def uvotsource(image_file):
'''
Performs the 'uvotsource' command on a given .img file
'''
output_file = image_file[:-4] + '.fits'
subprocess.call(['uvotsource image={} srcreg=src.reg bkgreg=bkg.reg sigma=1.0 outfile=uvotsource/{}'.format(image_file, output_file)], shell=True)
def uvotproduct(image_file):
'''
#TODO translate the following into python
my @in = <file>;
$t = $in3[0];
$MET = (($t - 53769.415972)*24*3600) + 160653541.0;
system "uvotproduct timezero=$MET infile=$dir outfile=$outfile plotfile=$outfile2 srcreg=src.reg bkgreg=bkg.reg batpos=NONE xrtpos=NONE uvotpos=NONE groundpos=NONE reportfile=$rep clobber=yes";
'''
pass
def GetFluxes(fits_file):
df = pd.DataFrame()
data = fits.open(fits_file)
df['MET'] = np.array(data[1].data['MET'], dtype=float)
df['TSTART'] = np.array(data[1].data['TSTART'], dtype=float)
df['TSTOP'] = np.array(data[1].data['TSTOP'], dtype=float)
df['RAW_TOT_CNTS'] = np.array(data[1].data['RAW_TOT_CNTS'], dtype=float)
df['RAW_TOT_CNTS_ERR'] = | np.array(data[1].data['RAW_TOT_CNTS_ERR'], dtype=float) | numpy.array |
import numpy as np
from scipy.interpolate import interp1d
class NormalizedPulseTemplate:
"""
Class for handling the template for the pulsed response of the pixels
of the camera to a single photo-electron in high and low gain.
"""
def __init__(self, amplitude_HG, amplitude_LG, time, amplitude_HG_err=None,
amplitude_LG_err=None):
"""
Save the pulse template and optional error
and create an interpolation.
Parameters
----------
amplitude_HG/LG: array
Amplitude of the signal produced in a pixel by a photo-electron
in high gain (HG) and low gain (LG) for successive time samples
time: array
Times of the samples
amplitude_HG/LG_err: array
Error on the pulse template amplitude
"""
self.time = np.array(time)
self.amplitude_HG = np.array(amplitude_HG)
self.amplitude_LG = | np.array(amplitude_LG) | numpy.array |
# -*- coding: utf-8 -*-
"""NOISE ROUTINES
This module contains methods for adding and removing noise from data.
:Author: <NAME> <<EMAIL>>
"""
from builtins import zip
import numpy as np
def add_noise(data, sigma=1.0, noise_type='gauss'):
r"""Add noise to data
This method adds Gaussian or Poisson noise to the input data
Parameters
----------
data : np.ndarray, list or tuple
Input data array
sigma : float or list, optional
Standard deviation of the noise to be added ('gauss' only)
noise_type : str {'gauss', 'poisson'}
Type of noise to be added (default is 'gauss')
Returns
-------
np.ndarray input data with added noise
Raises
------
ValueError
If `noise_type` is not 'gauss' or 'poisson'
ValueError
If number of `sigma` values does not match the first dimension of the
input data
Examples
--------
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.arange(9).reshape(3, 3).astype(float)
>>> x
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.random.seed(1)
>>> add_noise(x, noise_type='poisson')
array([[ 0., 2., 2.],
[ 4., 5., 10.],
[ 11., 15., 18.]])
>>> import numpy as np
>>> from modopt.signal.noise import add_noise
>>> x = np.zeros(5)
>>> x
array([ 0., 0., 0., 0., 0.])
>>> np.random.seed(1)
>>> add_noise(x, sigma=2.0)
array([ 3.24869073, -1.22351283, -1.0563435 , -2.14593724, 1.73081526])
"""
data = | np.array(data) | numpy.array |
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
def load_hydroSystem(file_path):
'''
Função responsável pelo processamento e importação dos dados do Condition Monitoring of Hydraulic Systems Dataset
I/O:
path: uma string contendo o diretório onde os conjuntos de dados dos sensores estão contidos;
return: um Numpy Array de formato ((nº de instâncias, timestamp, features), label)
'''
# Listagem dos arquivos contendo os dados dos sensores
load_names = os.listdir(file_path)
load_names.remove('description.txt')
load_names.remove('documentation.txt')
# Indexição das colunas para o upsamplig das variáveis com maior taxa de amostragem
cols_1 = np.arange(0, 6000, 100)
cols_10 = | np.arange(0, 6000, 10) | numpy.arange |
# The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import numpy as np
from numpy.testing.utils import assert_allclose
from ... import units as u
from ...tests.helper import pytest, raises
class TestUfuncCoverage(object):
"""Test that we cover all ufunc's"""
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if type(ufunc) == np.ufunc])
from .. import quantity_helper as qh
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_np_ufuncs - all_q_ufuncs == set([])
assert all_q_ufuncs - all_np_ufuncs == set([])
class TestQuantityTrigonometricFuncs(object):
"""
Test trigonometric functions
"""
def test_sin_scalar(self):
q = np.sin(30. * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_sin_array(self):
q = np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1. / np.sqrt(2.), 1.]), atol=1.e-15)
def test_arcsin_scalar(self):
q1 = 30. * u.degree
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arcsin_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_sin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.sin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'sin' function "
"to quantities with angle units")
def test_arcsin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arcsin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'arcsin' function to "
"dimensionless quantities")
def test_cos_scalar(self):
q = np.cos(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_cos_array(self):
q = np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([1., 1. / np.sqrt(2.), 0.]), atol=1.e-15)
def test_arccos_scalar(self):
q1 = np.pi / 3. * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arccos_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_cos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.cos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'cos' function "
"to quantities with angle units")
def test_arccos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arccos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'arccos' function to "
"dimensionless quantities")
def test_tan_scalar(self):
q = np.tan(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, np.sqrt(3.))
def test_tan_array(self):
q = np.tan(np.array([0., 45., 135., 180.]) * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1., -1., 0.]), atol=1.e-15)
def test_arctan_scalar(self):
q = np.pi / 3. * u.radian
assert np.arctan(np.tan(q))
def test_arctan_array(self):
q = np.array([10., 30., 70., 80.]) * u.degree
assert_allclose(np.arctan(np.tan(q)).to(q.unit).value, q.value)
def test_tan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.tan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'tan' function "
"to quantities with angle units")
def test_arctan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arctan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'arctan' function to "
"dimensionless quantities")
def test_arctan2_valid(self):
q1 = np.array([10., 30., 70., 80.]) * u.m
q2 = 2.0 * u.km
assert np.arctan2(q1, q2).unit == u.radian
assert_allclose(np.arctan2(q1, q2).value,
np.arctan2(q1.value, q2.to(q1.unit).value))
q3 = q1 / q2
q4 = 1.
at2 = np.arctan2(q3, q4)
assert_allclose(at2.value, np.arctan2(q3.to(1).value, q4))
def test_arctan2_invalid(self):
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1. * u.s)
assert "compatible dimensions" in exc.value.args[0]
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1.)
assert "dimensionless quantities when other arg" in exc.value.args[0]
def test_radians(self):
q1 = np.deg2rad(180. * u.degree)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = np.radians(180. * u.degree)
assert_allclose(q2.value, np.pi)
assert q2.unit == u.radian
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q3 = np.deg2rad(3. * u.radian)
assert_allclose(q3.value, 3.)
assert q3.unit == u.radian
q4 = np.radians(3. * u.radian)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
np.deg2rad(3. * u.m)
with pytest.raises(TypeError):
np.radians(3. * u.m)
def test_degrees(self):
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q1 = np.rad2deg(60. * u.degree)
assert_allclose(q1.value, 60.)
assert q1.unit == u.degree
q2 = np.degrees(60. * u.degree)
assert_allclose(q2.value, 60.)
assert q2.unit == u.degree
q3 = np.rad2deg(np.pi * u.radian)
assert_allclose(q3.value, 180.)
assert q3.unit == u.degree
q4 = np.degrees(np.pi * u.radian)
assert_allclose(q4.value, 180.)
assert q4.unit == u.degree
with pytest.raises(TypeError):
np.rad2deg(3. * u.m)
with pytest.raises(TypeError):
np.degrees(3. * u.m)
class TestQuantityMathFuncs(object):
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_divmod_and_floor_divide(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
quotient2, remainder = divmod(dividend, divisor)
assert np.all(quotient2 == quotient)
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
# cbrt only introduced in numpy 1.10
@pytest.mark.skipif("not hasattr(np, 'cbrt')")
def test_cbrt_scalar(self):
assert np.cbrt(8. * u.m**3) == 2. * u.m
@pytest.mark.skipif("not hasattr(np, 'cbrt')")
def test_cbrt_array(self):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(np.cbrt(values * u.m**3) ==
np.cbrt(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) == -np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) == -np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, np.array([-2.,2.,-4.]) * u.m) == np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to(1).value)
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs(object):
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to(q_i1.unit).value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to(q_i1.unit).value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs(object):
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == np.bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to(q_i1.unit).value))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == np.bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2).to(1).value, 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs(object):
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
| np.arcsin(value/10., out=s) | numpy.arcsin |
from typing import Tuple, Callable, Optional
import pickle
import pytest
from copy import copy
from pathlib import Path
from _helpers import (
bias_knn,
create_kernels,
density_normalization,
jax_not_installed_skip,
random_transition_matrix,
)
import scanpy as sc
import cellrank as cr
from scanpy import Neighbors
from anndata import AnnData
from cellrank._key import Key
from cellrank.tl._utils import _normalize
from cellrank.ul._utils import _get_neighs, _get_neighs_params
from cellrank.tl.kernels import (
VelocityKernel,
CytoTRACEKernel,
PseudotimeKernel,
PrecomputedKernel,
ConnectivityKernel,
)
from cellrank.tl.kernels._base_kernel import (
Kernel,
Constant,
KernelAdd,
KernelMul,
_dtype,
_is_bin_mult,
)
from cellrank.tl.kernels._cytotrace_kernel import CytoTRACEAggregation
import numpy as np
from scipy.sparse import eye as speye
from scipy.sparse import isspmatrix_csr
from pandas.core.dtypes.common import is_bool_dtype, is_integer_dtype
_rtol = 1e-6
class CustomFunc(cr.tl.kernels.SimilaritySchemeABC):
def __call__(
self, v: np.ndarray, D: np.ndarray, softmax_scale: float = 1.0
) -> Tuple[np.ndarray, np.ndarray]:
probs, logits = np.zeros((D.shape[0],), dtype=np.float64), np.zeros(
(D.shape[0],), dtype=np.float64
)
probs[0] = 1.0
return probs, logits
class CustomFuncHessian(CustomFunc):
def hessian(
self, v: np.ndarray, D: np.ndarray, _softmax_scale: float = 1.0
) -> np.ndarray:
# should be either (n, g, g) or (n, g), will be (g, g)
return np.zeros((D.shape[0], v.shape[0], v.shape[0]))
class CustomKernel(Kernel):
def compute_transition_matrix(
self, sparse: bool = False, dnorm: bool = False
) -> "KernelExpression":
if sparse:
tmat = speye(self.adata.n_obs, dtype=np.float32)
else:
tmat = np.eye(self.adata.n_obs, dtype=np.float32)
self._compute_transition_matrix(tmat, density_normalize=dnorm)
return self
def copy(self) -> "KernelExpression":
return copy(self)
class InvalidFuncProbs(cr.tl.kernels.SimilaritySchemeABC):
def __call__(
self, v: np.ndarray, D: np.ndarray, _softmax_scale: float = 1.0
) -> Tuple[np.ndarray, np.ndarray]:
return np.ones((D.shape[0],), dtype=np.float64), np.zeros(
(D.shape[0],), dtype=np.float64
)
class InvalidFuncHessianShape(CustomFunc):
def __call__(
self, v: np.ndarray, D: np.ndarray, _softmax_scale: float = 1.0
) -> Tuple[np.ndarray, np.ndarray]:
probs, logits = np.zeros((D.shape[0],), dtype=np.float64), np.zeros(
(D.shape[0],), dtype=np.float64
)
probs[-1] = 1.0
return probs, logits
def hessian(
self, v: np.ndarray, _D: np.ndarray, _softmax_scale: float = 1.0
) -> np.ndarray:
# should be either (n, g, g) or (n, g), will be (g, g)
return np.zeros((v.shape[0], v.shape[0]))
class TestInitializeKernel:
def test_none_transition_matrix(self, adata: AnnData):
vk = VelocityKernel(adata)
ck = ConnectivityKernel(adata)
pk = PseudotimeKernel(adata, time_key="latent_time")
assert vk._transition_matrix is None
assert ck._transition_matrix is None
assert pk._transition_matrix is None
def test_not_none_transition_matrix_compute(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
ck = ConnectivityKernel(adata).compute_transition_matrix()
pk = PseudotimeKernel(adata, time_key="latent_time").compute_transition_matrix()
assert vk.transition_matrix is not None
assert ck.transition_matrix is not None
assert pk.transition_matrix is not None
def test_not_none_transition_matrix_accessor(self, adata: AnnData):
vk = VelocityKernel(adata)
ck = ConnectivityKernel(adata)
pk = PseudotimeKernel(adata, time_key="latent_time")
assert vk.transition_matrix is not None
assert ck.transition_matrix is not None
assert pk.transition_matrix is not None
def test_adding_hidden_constants(self, adata: AnnData):
k = VelocityKernel(adata) + ConnectivityKernel(adata)
assert _is_bin_mult(k[0])
assert isinstance(k[0], KernelMul)
assert isinstance(k[0][0], Constant)
assert isinstance(k[0][1], VelocityKernel)
assert k[0][0].transition_matrix == 1.0
assert _is_bin_mult(k[1])
assert isinstance(k[1], KernelMul)
assert isinstance(k[1][0], Constant)
assert isinstance(k[1][1], ConnectivityKernel)
assert k[1][0].transition_matrix == 1.0
def test_length(self, adata: AnnData):
k = VelocityKernel(adata) + ConnectivityKernel(adata)
assert len(k) == 2
def test_accessor_out_of_range(self, adata: AnnData):
k = VelocityKernel(adata) + ConnectivityKernel(adata)
with pytest.raises(IndexError):
_ = k[2]
def test_parent(self, adata: AnnData):
vk = VelocityKernel(adata)
ck = ConnectivityKernel(adata)
k = vk + ck
assert vk._parent._parent is k # invisible constants
assert ck._parent._parent is k
assert k._parent is None
def test_uninitialized_both(self, adata: AnnData):
k = VelocityKernel(adata) + ConnectivityKernel(adata)
with pytest.raises(RuntimeError):
k.compute_transition_matrix()
def test_uninitialized_one(self, adata: AnnData):
k = (
VelocityKernel(adata)
+ ConnectivityKernel(adata).compute_transition_matrix()
)
with pytest.raises(RuntimeError):
k.compute_transition_matrix()
def test_initialized(self, adata: AnnData):
k = (
VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
+ ConnectivityKernel(adata).compute_transition_matrix()
)
k.compute_transition_matrix()
assert k.transition_matrix is not None
def test_invalida_type(self, adata: AnnData):
with pytest.raises(TypeError):
_ = None * VelocityKernel(adata)
def test_negative_constant(self, adata: AnnData):
with pytest.raises(ValueError):
_ = -1 * VelocityKernel(adata)
def test_invalid_constant(self, adata: AnnData):
with pytest.raises(TypeError):
_ = Constant(adata, None)
def test_inversion(self, adata: AnnData):
c = ConnectivityKernel(adata, backward=False)
assert not c.backward
nc = ~c
assert nc.backward
def test_inversion_inplace(self, adata: AnnData):
c = ConnectivityKernel(adata, backward=False)
assert not c.backward
_ = ~c
assert c.backward
def test_inversion_propagation(self, adata: AnnData):
c = ConnectivityKernel(adata, backward=False)
v = VelocityKernel(adata, backward=False)
k = ~(c + v)
assert c.backward
assert v.backward
assert k.backward
def test_inversion_recalculation(self, adata: AnnData):
c = ConnectivityKernel(adata).compute_transition_matrix()
z = ~(c + c)
with pytest.raises(RuntimeError):
z.compute_transition_matrix()
def test_inversion_preservation_of_constants(self, adata: AnnData):
c = ConnectivityKernel(adata).compute_transition_matrix()
a = (3 * c + 1 * c).compute_transition_matrix()
b = ~a
c.compute_transition_matrix()
assert a[0][0].transition_matrix == 3 / 4
assert b[0][0].transition_matrix == 3 / 4
assert a[1][0].transition_matrix == 1 / 4
assert b[1][0].transition_matrix == 1 / 4
def test_addition_simple(self, adata: AnnData):
k = VelocityKernel(adata) + ConnectivityKernel(adata)
assert isinstance(k, KernelAdd)
def test_multiplication_simple(self, adata: AnnData):
k = 10 * VelocityKernel(adata)
c = _is_bin_mult(k)
assert isinstance(c, Constant)
assert c.transition_matrix == 10
def test_multiplication_simple_normalization(self, adata: AnnData):
k = 10 * VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
c = _is_bin_mult(k)
assert c.transition_matrix == 10
def test_constant(self, adata: AnnData):
k = 9 * VelocityKernel(adata) + 1 * ConnectivityKernel(adata)
c1, c2 = _is_bin_mult(k[0]), _is_bin_mult(k[1])
assert c1.transition_matrix == 9
assert c2.transition_matrix == 1
def test_constant_normalize_2(self, adata: AnnData):
k = (
9 * VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
+ 1 * ConnectivityKernel(adata).compute_transition_matrix()
)
k.compute_transition_matrix()
c1, c2 = _is_bin_mult(k[0]), _is_bin_mult(k[1])
assert c1.transition_matrix == 9 / 10
assert c2.transition_matrix == 1 / 10
def test_constant_normalize_3(self, adata: AnnData):
k = (
VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
+ ConnectivityKernel(adata).compute_transition_matrix()
+ ConnectivityKernel(adata).compute_transition_matrix()
)
k.compute_transition_matrix()
c1, c2, c3 = _is_bin_mult(k[0]), _is_bin_mult(k[1]), _is_bin_mult(k[2])
assert c1.transition_matrix == 1 / 3
assert c2.transition_matrix == 1 / 3
assert c3.transition_matrix == 1 / 3
def test_constant_wrong_parentheses(self, adata: AnnData):
k = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4) + (
ConnectivityKernel(adata).compute_transition_matrix()
+ ConnectivityKernel(adata).compute_transition_matrix()
)
k.compute_transition_matrix()
c1, c2, c3 = _is_bin_mult(k[0]), _is_bin_mult(k[1]), _is_bin_mult(k[2])
assert c1.transition_matrix == 1 / 3
assert c2.transition_matrix == 1 / 3
assert c3.transition_matrix == 1 / 3
def test_constant_correct_parentheses(self, adata: AnnData):
k = 1 * VelocityKernel(adata).compute_transition_matrix(softmax_scale=4) + 1 * (
ConnectivityKernel(adata).compute_transition_matrix()
+ ConnectivityKernel(adata).compute_transition_matrix()
)
k.compute_transition_matrix()
c1, c2, c3 = (
_is_bin_mult(k[0]),
_is_bin_mult(k[1][1][0]),
_is_bin_mult(k[1][1][1]),
)
assert c1.transition_matrix == 1 / 2
assert c2.transition_matrix == 1 / 2
assert c3.transition_matrix == 1 / 2
def test_adaptive_kernel_constants(self, adata: AnnData):
ck1 = ConnectivityKernel(adata).compute_transition_matrix()
ck1._mat_scaler = np.random.normal(size=(adata.n_obs, adata.n_obs))
ck2 = ConnectivityKernel(adata).compute_transition_matrix()
ck2._mat_scaler = np.random.normal(size=(adata.n_obs, adata.n_obs))
k = (3 * ck1) ^ (1 * ck2)
k.compute_transition_matrix()
assert k[0][0]._value == 3 / 4
assert k[1][0]._value == 1 / 4
def test_adaptive_kernel_complex(self, adata: AnnData):
ck1 = ConnectivityKernel(adata).compute_transition_matrix()
ck1._mat_scaler = np.random.normal(size=(adata.n_obs, adata.n_obs))
ck2 = ConnectivityKernel(adata).compute_transition_matrix()
ck2._mat_scaler = np.random.normal(size=(adata.n_obs, adata.n_obs))
ck3 = ConnectivityKernel(adata).compute_transition_matrix()
ck3._mat_scaler = np.random.normal(size=(adata.n_obs, adata.n_obs))
k = 4 * ((3 * ck1) ^ (1 * ck2)) + 2 * ck3
k.compute_transition_matrix()
assert k[0][0].transition_matrix == 4 / 6
assert k[1][0].transition_matrix == 2 / 6
assert k[0][1][0][0]._value == 3 / 4
assert k[0][1][1][0]._value == 1 / 4
def test_repr(self, adata: AnnData):
rpr = repr(VelocityKernel(adata))
assert rpr == f"<{VelocityKernel.__name__}>"
def test_repr_inv(self, adata: AnnData):
rpr = repr(~VelocityKernel(adata))
assert rpr == f"~<{VelocityKernel.__name__}>"
def test_repr_inv_comb(self, adata: AnnData):
rpr = repr(~(VelocityKernel(adata) + ConnectivityKernel(adata)))
assert (
rpr
== f"~((1 * <{VelocityKernel.__name__}>) + (1 * <{ConnectivityKernel.__name__}>))"
)
def test_str_repr_equiv_no_transition_matrix(self, adata: AnnData):
vk = VelocityKernel(adata)
string = str(vk)
rpr = repr(vk)
assert string == rpr
assert string == f"<{VelocityKernel.__name__}>"
def test_str(self, adata: AnnData):
string = str(ConnectivityKernel(adata).compute_transition_matrix())
assert (
string == f"<{ConnectivityKernel.__name__}[dnorm=True, key=connectivities]>"
)
def test_str_inv(self, adata: AnnData):
string = str(
ConnectivityKernel(adata, backward=True).compute_transition_matrix()
)
assert (
string
== f"~<{ConnectivityKernel.__name__}[dnorm=True, key=connectivities]>"
)
def test_combination_correct_parameters(self, adata: AnnData):
from cellrank.tl.kernels import CosineScheme
k = VelocityKernel(adata).compute_transition_matrix(
softmax_scale=4,
seed=42,
scheme="cosine",
) + (
ConnectivityKernel(adata).compute_transition_matrix(density_normalize=False)
+ ConnectivityKernel(adata).compute_transition_matrix(
density_normalize=True
)
)
k.compute_transition_matrix()
assert isinstance(k.params, dict)
assert len(k.params) == 3
assert {"dnorm": True, "key": "connectivities"} in k.params.values()
assert {"dnorm": False, "key": "connectivities"} in k.params.values()
assert {
"softmax_scale": 4,
"mode": "deterministic",
"seed": 42,
"scheme": str(CosineScheme()),
} in k.params.values()
class TestKernel:
def test_precomputed_not_array(self):
with pytest.raises(TypeError):
_ = PrecomputedKernel([[1, 0], [0, 1]])
def test_precomputed_not_square(self):
with pytest.raises(ValueError):
_ = PrecomputedKernel(np.random.normal(size=(10, 9)))
def test_precomputed_not_a_transition_matrix(self):
mat = random_transition_matrix(100)
mat[0, 0] = 0xDEADBEEF
with pytest.raises(ValueError):
_ = PrecomputedKernel(mat)
def test_precomputed_from_kernel_no_transition(self, adata: AnnData):
vk = VelocityKernel(adata)
with pytest.raises(ValueError):
PrecomputedKernel(vk)
@pytest.mark.parametrize(
"clazz",
[
ConnectivityKernel,
VelocityKernel,
PseudotimeKernel,
CytoTRACEKernel,
PrecomputedKernel,
],
)
@pytest.mark.parametrize("key_added", [None, "foo"])
def test_kernel_reads_correct_connectivities(
self, adata: AnnData, key_added: Optional[str], clazz: type
):
del adata.uns["neighbors"]
del adata.obsp["connectivities"]
del adata.obsp["distances"]
sc.pp.neighbors(adata, key_added=key_added)
kwargs = {"adata": adata, "conn_key": key_added}
if clazz == PseudotimeKernel:
kwargs["time_key"] = "latent_time"
elif clazz == PrecomputedKernel:
adata.obsp["foo"] = np.eye(adata.n_obs)
kwargs["transition_matrix"] = "foo"
conn = (
adata.obsp["connectivities"]
if key_added is None
else adata.obsp[f"{key_added}_connectivities"]
)
k = clazz(**kwargs)
if isinstance(k, PrecomputedKernel):
assert k._conn is None
else:
np.testing.assert_array_equal(k._conn.A, conn.A)
def test_precomputed_from_kernel(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(
mode="deterministic",
softmax_scale=4,
)
pk = PrecomputedKernel(vk)
pk.write_to_adata()
assert pk.adata is vk.adata
assert pk._origin == str(vk).strip("~<>")
assert pk.params is not vk.params
assert pk.params == vk.params
assert pk.transition_matrix is not vk.transition_matrix
np.testing.assert_array_equal(pk.transition_matrix.A, vk.transition_matrix.A)
def test_precomputed_no_adata(self):
pk = PrecomputedKernel(random_transition_matrix(50))
pk.write_to_adata()
assert isinstance(pk.adata, AnnData)
assert pk._origin == "'array'"
assert pk.adata.shape == (50, 1)
assert pk.adata.obs.shape == (50, 0)
assert pk.adata.var.shape == (1, 0)
assert "T_fwd_params" in pk.adata.uns.keys()
assert pk.adata.uns["T_fwd_params"] == {"params": pk.params}
np.testing.assert_array_equal(
pk.adata.obsp["T_fwd"].toarray(), pk.transition_matrix.toarray()
)
def test_precomputed_different_adata(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(
mode="deterministic", softmax_scale=4
)
bdata = adata.copy()
pk = PrecomputedKernel(vk, adata=bdata)
assert pk.adata is adata
assert pk.adata is vk.adata
assert pk.adata is not bdata
def test_precomputed_adata_origin(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(
mode="deterministic", softmax_scale=4
)
vk.write_to_adata("foo")
pk = PrecomputedKernel("foo", adata=adata)
assert pk._origin == "adata.obsp['foo']"
def test_precomputed_adata(self, adata: AnnData):
pk = PrecomputedKernel(random_transition_matrix(adata.n_obs), adata=adata)
assert pk.adata is adata
def test_precomputed_transition_matrix(self, adata: AnnData):
mat = random_transition_matrix(adata.n_obs)
pk = PrecomputedKernel(mat)
np.testing.assert_array_equal(mat, pk.transition_matrix.toarray())
def test_precomputed_sum(self, adata: AnnData):
mat = random_transition_matrix(adata.n_obs)
pk = PrecomputedKernel(mat)
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
expected = (0.5 * vk.transition_matrix) + (0.5 * pk.transition_matrix)
actual = (pk + vk).compute_transition_matrix()
np.testing.assert_array_almost_equal(
expected.toarray(), actual.transition_matrix.toarray()
)
@pytest.mark.parametrize("dnorm", [False, True])
@pytest.mark.parametrize("sparse", [False, True])
def test_custom_preserves_type(self, adata: AnnData, sparse: bool, dnorm: bool):
c = CustomKernel(adata).compute_transition_matrix(sparse=sparse, dnorm=dnorm)
if sparse:
assert isspmatrix_csr(c.transition_matrix)
else:
assert isinstance(c.transition_matrix, np.ndarray)
assert c.transition_matrix.dtype == _dtype
def test_write_adata(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
vk.write_to_adata()
assert adata is vk.adata
assert "T_fwd_params" in adata.uns.keys()
np.testing.assert_array_equal(
adata.obsp["T_fwd"].toarray(), vk.transition_matrix.toarray()
)
def test_write_adata_key(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
vk.write_to_adata(key="foo")
assert adata is vk.adata
assert "foo_params" in adata.uns.keys()
np.testing.assert_array_equal(
adata.obsp["foo"].toarray(), vk.transition_matrix.toarray()
)
@pytest.mark.parametrize("mode", ["deterministic", "stochastic"])
def test_vk_row_normalized(self, adata: AnnData, mode: str):
if mode == "stochastic":
pytest.importorskip("jax")
pytest.importorskip("jaxlib")
vk = VelocityKernel(adata)
vk.compute_transition_matrix(mode="stochastic", softmax_scale=4)
np.testing.assert_allclose(vk.transition_matrix.sum(1), 1, rtol=_rtol)
# only to 15 because in kernel, if a row sums to 0, abs. states are created
# this happens because `k_thresh = frac_to_keep = 0`
@pytest.mark.parametrize("k", range(1, 15))
def test_pseudotime_frac_to_keep(self, adata: AnnData, k: int):
conn = _get_neighs(adata, "connectivities")
n_neighbors = _get_neighs_params(adata)["n_neighbors"]
pseudotime = adata.obs["latent_time"]
k_thresh = max(0, min(int(np.floor(n_neighbors / k)) - 1, 30))
frac_to_keep = k_thresh / float(n_neighbors)
conn_biased = bias_knn(
conn.copy(), pseudotime, n_neighbors, k=k, frac_to_keep=frac_to_keep
)
T_1 = _normalize(conn_biased)
pk = PseudotimeKernel(adata, time_key="latent_time").compute_transition_matrix(
frac_to_keep=frac_to_keep,
threshold_scheme="hard",
)
T_2 = pk.transition_matrix
np.testing.assert_allclose(T_1.A, T_2.A, rtol=_rtol)
def test_pseudotime_parallelize(self, adata: AnnData):
pk1 = PseudotimeKernel(adata, time_key="latent_time").compute_transition_matrix(
n_jobs=None
)
pk2 = PseudotimeKernel(adata, time_key="latent_time").compute_transition_matrix(
n_jobs=2
)
np.testing.assert_allclose(
pk1.transition_matrix.A, pk2.transition_matrix.A, rtol=_rtol
)
def test_pseudotime_inverse(self, adata: AnnData):
pk = PseudotimeKernel(adata, time_key="latent_time")
pt = pk.pseudotime.copy()
pk_inv = ~pk
assert pk_inv is pk
assert pk_inv.backward
np.testing.assert_allclose(pt, 1 - pk_inv.pseudotime)
@pytest.mark.parametrize("mode", ["deterministic", "stochastic", "sampling"])
def test_manual_combination(self, adata: AnnData, mode: str):
if mode == "stochastic":
pytest.importorskip("jax")
pytest.importorskip("jaxlib")
vk = VelocityKernel(adata).compute_transition_matrix(mode=mode, softmax_scale=4)
ck = ConnectivityKernel(adata).compute_transition_matrix()
T_vk = vk.transition_matrix
T_ck = ck.transition_matrix
T_comb_manual = 0.8 * T_vk + 0.2 * T_ck
comb_kernel = 0.8 * vk + 0.2 * ck
T_comb_kernel = comb_kernel.transition_matrix
np.testing.assert_allclose(T_comb_kernel.A, T_comb_manual.A, rtol=_rtol)
def test_manual_combination_no_precomputed(self, adata: AnnData):
density_normalize = False
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
ck = ConnectivityKernel(adata).compute_transition_matrix(
density_normalize=density_normalize
)
T_vk = vk.transition_matrix
T_ck = ck.transition_matrix
T_comb_manual = 0.8 * T_vk + 0.2 * T_ck
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
ck = ConnectivityKernel(adata).compute_transition_matrix(
density_normalize=density_normalize
)
comb_kernel = 0.8 * vk + 0.2 * ck
comb_kernel.compute_transition_matrix()
T_comb_kernel = comb_kernel.transition_matrix
np.testing.assert_allclose(T_comb_manual.A, T_comb_kernel.A, rtol=_rtol)
def test_manual_combination_backward(self, adata: AnnData):
backward, density_normalize = True, False
vk = VelocityKernel(adata, backward=backward).compute_transition_matrix(
softmax_scale=4
)
ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(
density_normalize=density_normalize
)
T_vk = vk.transition_matrix
T_ck = ck.transition_matrix
T_comb_manual = 0.8 * T_vk + 0.2 * T_ck
comb_kernel = 0.8 * vk + 0.2 * ck
T_comb_kernel = comb_kernel.transition_matrix
np.testing.assert_allclose(T_comb_manual.A, T_comb_kernel.A, rtol=_rtol)
def test_manual_combination_backward_dense_norm(self, adata: AnnData):
backward, density_normalize = True, True
vk = VelocityKernel(adata, backward=backward).compute_transition_matrix(
softmax_scale=4
)
ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(
density_normalize=density_normalize
)
T_vk = vk.transition_matrix
T_ck = ck.transition_matrix
T_comb_manual = 0.8 * T_vk + 0.2 * T_ck
comb_kernel = 0.8 * vk + 0.2 * ck
T_comb_kernel = comb_kernel.transition_matrix
np.testing.assert_allclose(T_comb_manual.A, T_comb_kernel.A, rtol=_rtol)
def compare_with_scanpy_density_normalize(self, adata: AnnData):
# check whether cellrank's transition matrix matches scanpy's
density_normalize = True
ck = ConnectivityKernel(adata).compute_transition_matrix(
density_normalize=density_normalize
)
T_cr = ck.transition_matrix
neigh = Neighbors(adata)
neigh.compute_transitions(density_normalize=density_normalize)
T_sc = neigh.transitions
# check whether these are the same while leaving them sparse
assert T_sc.shape == T_cr.shape
assert len(T_sc.indices) == len(T_cr.indices)
assert np.allclose((T_cr - T_sc).data, 0)
def compare_with_scanpy(self, adata: AnnData):
# check whether cellrank's transition matrix matches scanpy's
density_normalize = False
ck = ConnectivityKernel(adata).compute_transition_matrix(
density_normalize=density_normalize
)
T_cr = ck.transition_matrix
neigh = Neighbors(adata)
neigh.compute_transitions(density_normalize=density_normalize)
T_sc = neigh.transitions
# check whether these are the same while leaving them sparse
assert T_sc.shape == T_cr.shape
assert len(T_sc.indices) == len(T_cr.indices)
assert np.allclose((T_cr - T_sc).data, 0)
def test_connectivities_key_kernel(self, adata: AnnData):
key = "foobar"
assert key not in adata.obsp
adata.obsp[key] = np.eye(adata.n_obs)
ck = ConnectivityKernel(adata, conn_key=key).compute_transition_matrix()
T_cr = ck.transition_matrix
assert key == ck.params["key"]
np.testing.assert_array_equal(T_cr, adata.obsp[key])
del adata.obsp[key]
class TestKernelAddition:
def test_simple_addition(self, adata: AnnData):
vk, ck = create_kernels(adata) # diagonal + upper diag
k = (vk + ck).compute_transition_matrix()
expected = np.eye(adata.n_obs) * 0.75 + np.eye(adata.n_obs, k=1) * 0.25
expected[-1, -1] = 1
np.testing.assert_allclose(k.transition_matrix.A, expected)
def test_addtion_with_constant(self, adata: AnnData):
vk, ck = create_kernels(adata) # diagonal + upper diag
k = (2 * vk + 3 * ck).compute_transition_matrix()
expected = (
np.eye(adata.n_obs) * (2 / 5)
+ np.eye(adata.n_obs) * (3 / 5) * 0.5
+ np.eye(adata.n_obs, k=1) * (3 / 5) * 0.5
)
expected[-1, -1] = 1
np.testing.assert_allclose(k.transition_matrix.A, expected)
def test_addition_3_kernels(self, adata: AnnData):
vk, ck = create_kernels(adata) # diagonal + upper diag
vk1 = VelocityKernel(adata)
vk1._transition_matrix = np.eye(adata.n_obs, k=-1) / 2 + np.eye(adata.n_obs) / 2
vk1._transition_matrix[0, 0] = 1
np.testing.assert_allclose(
np.sum(ck._transition_matrix, axis=1), 1
) # sanity check
k = (vk + ck + vk1).compute_transition_matrix()
expected = (
np.eye(adata.n_obs) * (1 / 3 + 1 / 6 + 1 / 6)
+ np.eye(adata._n_obs, k=1) * 1 / 6
+ np.eye(adata.n_obs, k=-1) * 1 / 6
)
expected[0, 0] = expected[-1, -1] = 2 / 3 + 1 / 3 * 0.5
expected[0, 1] = expected[-1, -2] = 1 - expected[0, 0]
np.testing.assert_allclose(k.transition_matrix.A, expected)
def test_addition_adaptive(self, adata: AnnData):
adata.obsp["velocity_variances"] = vv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
adata.obsp["connectivity_variances"] = cv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
vk, ck = create_kernels(
adata,
velocity_variances="velocity_variances",
connectivity_variances="connectivity_variances",
)
k = vk ^ ck
expected = _normalize(
0.5 * vv * vk.transition_matrix + 0.5 * cv * ck.transition_matrix
)
np.testing.assert_allclose(k.transition_matrix.A, expected)
def test_addition_adaptive_constants(self, adata: AnnData):
a, b = np.random.uniform(0, 10, 2)
s = a + b
adata.obsp["velocity_variances"] = vv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
adata.obsp["connectivity_variances"] = cv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
vk, ck = create_kernels(
adata,
velocity_variances="velocity_variances",
connectivity_variances="connectivity_variances",
)
k = a * vk ^ b * ck
expected = _normalize(
a / s * vv * vk.transition_matrix + b / s * cv * ck.transition_matrix
)
np.testing.assert_allclose(k.transition_matrix.A, expected)
def test_addition_adaptive_wrong_variances(self, adata: AnnData):
a, b = np.random.uniform(0, 10, 2)
s = a + b
adata.obsp["velocity_variances"] = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
adata.obsp["connectivity_variances"] = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
vk, ck = create_kernels(
adata,
velocity_variances="velocity_variances",
connectivity_variances="connectivity_variances",
)
k = a * vk ^ b * ck
expected = _normalize(
a / s * vk.transition_matrix + b / s * ck.transition_matrix
)
assert not np.allclose(k.transition_matrix.A, expected.A)
def test_addition_adaptive_4_kernels(self, adata: AnnData):
a, b, c, d = np.random.uniform(0, 10, 4)
s = a + b + c + d
adata.obsp["velocity_variances"] = vv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
adata.obsp["connectivity_variances"] = cv = np.random.random(
size=(adata.n_obs, adata.n_obs)
)
vk, ck = create_kernels(
adata,
velocity_variances="velocity_variances",
connectivity_variances="connectivity_variances",
)
vk1, ck1 = create_kernels(
adata,
velocity_variances="velocity_variances",
connectivity_variances="connectivity_variances",
)
k = a * vk ^ b * ck ^ c * vk1 ^ d * ck1
expected = _normalize(
a / s * vv * vk.transition_matrix
+ b / s * cv * ck.transition_matrix
+ c / s * vv * vk1.transition_matrix
+ d / s * cv * ck1.transition_matrix
)
np.testing.assert_allclose(k.transition_matrix.A, expected)
class TestKernelCopy:
def test_copy_simple(self, adata: AnnData):
vk1 = VelocityKernel(adata)
vk2 = vk1.copy()
assert vk1 is not vk2
def test_copy_no_adata_copy(self, adata: AnnData):
vk1 = VelocityKernel(adata)
vk2 = vk1.copy()
assert vk1.adata is adata
assert vk2.adata is adata
def test_copy_transition_matrix(self, adata: AnnData):
vk1 = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
vk2 = vk1.copy()
np.testing.assert_array_equal(vk1.transition_matrix.A, vk2.transition_matrix.A)
def test_copy_params(self, adata: AnnData):
vk1 = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
vk2 = vk1.copy()
assert vk1.params == vk2.params
def test_copy_cond_num(self, adata: AnnData):
for KernelClass in [
VelocityKernel,
ConnectivityKernel,
PseudotimeKernel,
PrecomputedKernel,
]:
if KernelClass is PrecomputedKernel:
k1 = KernelClass(
random_transition_matrix(adata.n_obs), compute_cond_num=True
)
elif KernelClass is VelocityKernel:
k1 = KernelClass(
adata, compute_cond_num=True
).compute_transition_matrix(softmax_scale=4)
else:
k1 = KernelClass(
adata, compute_cond_num=True
).compute_transition_matrix()
k2 = k1.copy()
assert k1.condition_number == k2.condition_number
def test_copy_velocity_kernel(self, adata: AnnData):
vk1 = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
vk2 = vk1.copy()
np.testing.assert_array_equal(vk1.transition_matrix.A, vk2.transition_matrix.A)
np.testing.assert_array_equal(vk1.logits.A, vk2.logits.A)
assert vk1.params == vk2.params
assert vk1.backward == vk2.backward
def test_copy_connectivity_kernel(self, adata: AnnData):
ck1 = ConnectivityKernel(adata).compute_transition_matrix()
ck2 = ck1.copy()
np.testing.assert_array_equal(ck1.transition_matrix.A, ck2.transition_matrix.A)
assert ck1.params == ck2.params
assert ck1.backward == ck2.backward
def test_copy_palantir_kernel(self, adata: AnnData):
pk1 = PseudotimeKernel(adata).compute_transition_matrix()
pk2 = pk1.copy()
np.testing.assert_array_equal(pk1.transition_matrix.A, pk2.transition_matrix.A)
assert pk1.params == pk2.params
assert pk1.backward == pk2.backward
def test_copy_works(self, adata: AnnData):
ck1 = ConnectivityKernel(adata)
ck2 = ck1.copy()
ck1.compute_transition_matrix()
assert (
ck1._transition_matrix is not None
) # calling the property would trigger the calculation
assert ck2._transition_matrix is None
class TestGeneral:
def test_kernels(self, adata: AnnData):
vk = VelocityKernel(adata)
assert len(vk.kernels) == 1
assert vk.kernels[0] is vk
def test_kernels_multiple(self, adata: AnnData):
vk = VelocityKernel(adata)
ck = ConnectivityKernel(adata)
v = vk + ck
assert len(v.kernels) == 2
assert vk in v.kernels
assert ck in v.kernels
def test_kernels_multiple_constant(self, adata: AnnData):
vk = VelocityKernel(adata)
ck = ConnectivityKernel(adata)
v = 100 * vk + 42 * ck
assert len(v.kernels) == 2
assert vk in v.kernels
assert ck in v.kernels
def test_kernels_unique(self, adata: AnnData):
vk = VelocityKernel(adata)
v = vk + vk + vk + vk
assert len(v.kernels) == 1
assert v.kernels[0] is vk
def test_no_comp_cond_num(self, adata: AnnData):
vk = VelocityKernel(adata).compute_transition_matrix(softmax_scale=4)
assert vk.condition_number is None
def test_comp_cond_num(self, adata: AnnData):
vk = VelocityKernel(adata, compute_cond_num=True).compute_transition_matrix(
softmax_scale=4
)
assert isinstance(vk.condition_number, float)
def test_comp_cond_num_or_policy(self, adata: AnnData):
vk = VelocityKernel(adata, compute_cond_num=True).compute_transition_matrix(
softmax_scale=4
)
ck = ConnectivityKernel(
adata, compute_cond_num=False
).compute_transition_matrix()
v = (vk + ck).compute_transition_matrix()
assert isinstance(vk.condition_number, float)
assert ck.condition_number is None
assert isinstance(v.condition_number, float)
class TestTransitionProbabilities:
def test_pearson_correlations_fwd(self, adata: AnnData):
# test whether pearson correlations in cellrank match those from scvelo, forward case
backward = False
# compute pearson correlations using scvelo
velo_graph = adata.obsp["velocity_graph"] + adata.obsp["velocity_graph_neg"]
# compute pearson correlations using cellrank
vk = VelocityKernel(adata, backward=backward)
vk.compute_transition_matrix(mode="deterministic", softmax_scale=4)
pearson_correlations_cr = vk.logits
pc_r = velo_graph.copy()
pc_r.data = np.array(pearson_correlations_cr[(velo_graph != 0)]).squeeze()
assert np.max(np.abs((pc_r - velo_graph).data)) < _rtol
def test_pearson_correlations_bwd(self, adata: AnnData):
# test whether pearson correlations in cellrank match those from scvelo, backward case
backward = True
# compute pearson correlations using scvelo
velo_graph = (adata.obsp["velocity_graph"] + adata.obsp["velocity_graph_neg"]).T
# compute pearson correlations using cellrak
vk = VelocityKernel(adata, backward=backward)
vk.compute_transition_matrix(
mode="deterministic", backward_mode="transpose", softmax_scale=4
)
pearson_correlations_cr = vk.logits
pc_r = velo_graph.copy()
pc_r.data = np.array(pearson_correlations_cr[(velo_graph != 0)]).squeeze()
assert np.max(np.abs((pc_r - velo_graph.T).data)) < _rtol
def test_transition_probabilities_fwd(self, adata: AnnData):
# test whether transition probabilities in cellrank match those from scvelo, forward case
sigma_test = 3
# compute transition probabilities using cellrank
vk = VelocityKernel(adata)
vk.compute_transition_matrix(softmax_scale=sigma_test, mode="deterministic")
T_cr = vk.transition_matrix
pearson_correlation = vk.logits
T_exp = np.expm1(pearson_correlation * sigma_test)
T_exp.data += 1
T_exp = _normalize(T_exp)
np.testing.assert_allclose(T_exp.A, T_cr.A) # don't use data, can be reordered
def test_transition_probabilities_bwd(self, adata: AnnData):
# test whether transition probabilities in cellrank match those from scvelo, backward case
sigma_test = 3
# compute transition probabilities using cellrank
vk = VelocityKernel(adata, backward=True)
vk.compute_transition_matrix(softmax_scale=sigma_test, mode="deterministic")
T_cr = vk.transition_matrix
pearson_correlation = vk.logits
T_exp = np.expm1(pearson_correlation * sigma_test)
T_exp.data += 1
T_exp = _normalize(T_exp)
np.testing.assert_allclose(T_exp.A, T_cr.A) # don't use data, can be reordered
def test_estimate_softmax_scale(self, adata: AnnData):
vk = VelocityKernel(adata)
vk.compute_transition_matrix(
mode="deterministic", show_progress_bar=False, softmax_scale=None
)
assert isinstance(vk.params["softmax_scale"], float)
class TestMonteCarlo:
def test_mc_and_mc_fwd_1k(self, adata: AnnData):
vk_mc = VelocityKernel(adata, backward=False)
vk_mc.compute_transition_matrix(
mode="monte_carlo",
show_progress_bar=False,
n_samples=1000,
n_jobs=4,
softmax_scale=4,
)
vk_s = VelocityKernel(adata, backward=False)
vk_s.compute_transition_matrix(
mode="monte_carlo",
show_progress_bar=False,
n_samples=1000,
n_jobs=4,
softmax_scale=4,
)
val = np.mean(
np.abs(vk_mc.transition_matrix.data - vk_s.transition_matrix.data)
)
assert val < 1e-5, val
def test_monte_carlo_5k(self, adata: AnnData):
vk_mc = VelocityKernel(adata, backward=False)
vk_mc.compute_transition_matrix(
mode="monte_carlo",
show_progress_bar=False,
n_samples=5000,
n_jobs=4,
softmax_scale=4,
seed=42,
)
vk_s = VelocityKernel(adata, backward=False)
vk_s.compute_transition_matrix(
mode="monte_carlo",
show_progress_bar=False,
n_samples=5000,
n_jobs=4,
softmax_scale=4,
seed=43,
)
val = np.mean(
np.abs(vk_mc.transition_matrix.data - vk_s.transition_matrix.data)
)
assert val < 1e-5, val
@jax_not_installed_skip
def test_monte_carlo_and_stochastic(self, adata: AnnData):
vk_mc = VelocityKernel(adata, backward=False)
vk_mc.compute_transition_matrix(
mode="monte_carlo",
show_progress_bar=False,
n_samples=1000,
n_jobs=4,
softmax_scale=4,
)
vk_s = VelocityKernel(adata, backward=False)
vk_s.compute_transition_matrix(
mode="stochastic", show_progress_bar=False, n_jobs=4, softmax_scale=4
)
val = np.mean(
np.abs(vk_mc.transition_matrix.data - vk_s.transition_matrix.data)
)
assert val < 1e-3, val
class TestVelocityScheme:
def test_invalid_string_key(self, adata: AnnData):
vk = VelocityKernel(adata)
with pytest.raises(ValueError):
vk.compute_transition_matrix(scheme="foobar")
def test_not_callable(self, adata: AnnData):
vk = VelocityKernel(adata)
with pytest.raises(
TypeError, match="Expected `scheme` to be a function, found"
):
vk.compute_transition_matrix(scheme=1311)
def test_custom_function_not_sum_to_1(self, adata: AnnData):
vk = VelocityKernel(adata)
with pytest.raises(ValueError, match=r"Matrix is not row-stochastic."):
vk.compute_transition_matrix(scheme=InvalidFuncProbs())
def test_custom_function_invalid_hessian(self, adata: AnnData):
vk = VelocityKernel(adata)
with pytest.raises(ValueError, match=r"Expected full Hessian matrix"):
vk.compute_transition_matrix(
mode="stochastic", scheme=InvalidFuncHessianShape(), softmax_scale=4
)
@pytest.mark.parametrize("backward", [True, False])
def test_implementations_differ(self, adata: AnnData, backward: bool):
vk_dot = VelocityKernel(adata, backward=backward)
vk_dot.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme="dot_product"
)
vk_cos = VelocityKernel(adata, backward=backward)
vk_cos.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme="cosine"
)
vk_cor = VelocityKernel(adata, backward=backward)
vk_cor.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme="correlation"
)
np.testing.assert_allclose(vk_dot.transition_matrix.sum(1), 1.0)
np.testing.assert_allclose(vk_cor.transition_matrix.sum(1), 1.0)
np.testing.assert_allclose(vk_cor.transition_matrix.sum(1), 1.0)
assert not np.allclose(vk_dot.transition_matrix.A, vk_cos.transition_matrix.A)
assert not np.allclose(vk_cos.transition_matrix.A, vk_cor.transition_matrix.A)
assert not np.allclose(vk_cor.transition_matrix.A, vk_dot.transition_matrix.A)
@pytest.mark.parametrize(
"key,fn",
zip(
["dot_product", "cosine", "correlation"],
[
cr.tl.kernels.DotProductScheme(),
cr.tl.kernels.CosineScheme(),
cr.tl.kernels.CorrelationScheme(),
],
),
)
def test_function_and_string_key(self, adata: AnnData, key: str, fn: Callable):
vk_k = VelocityKernel(adata)
vk_fn = VelocityKernel(adata)
vk_k.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme=key
)
vk_fn.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme=fn
)
np.testing.assert_allclose(vk_k.transition_matrix.A, vk_fn.transition_matrix.A)
@pytest.mark.parametrize("backward", [True, False])
def test_custom_function(self, adata: AnnData, backward: bool):
vk = VelocityKernel(adata, backward=backward)
vk.compute_transition_matrix(
mode="deterministic", softmax_scale=4, scheme=CustomFuncHessian()
)
assert vk.params["scheme"] == str(CustomFuncHessian())
def test_custom_function_stochastic_no_hessian(self, adata: AnnData):
vk = VelocityKernel(adata)
vk.compute_transition_matrix(
mode="stochastic", scheme=CustomFunc(), softmax_scale=4, n_samples=10
)
assert vk.params["mode"] == "monte_carlo"
assert vk.params["scheme"] == str(CustomFunc())
class TestComputeProjection:
def test_no_transition_matrix(self, adata: AnnData):
with pytest.raises(RuntimeError, match=r"Compute transition matrix first as"):
cr.tl.kernels.ConnectivityKernel(adata).compute_projection()
def test_no_basis(self, adata: AnnData):
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
with pytest.raises(KeyError, match=r"Unable to find a basis in"):
ck.compute_projection(basis="foo")
def test_basis_prefix(self, adata: AnnData):
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
ck.compute_projection(basis="X_umap")
@pytest.mark.parametrize("write_first", [True, False])
def test_write_to_adata(self, adata: AnnData, write_first: bool):
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
if write_first:
ck.write_to_adata()
ck.compute_projection(basis="umap")
else:
ck.compute_projection(basis="umap")
ck.write_to_adata()
assert adata.uns[Key.uns.kernel(ck.backward) + "_params"] == {
"params": ck.params,
"embeddings": ["umap"],
}
@pytest.mark.parametrize("key_added", [None, "foo"])
def test_key_added(self, adata: AnnData, key_added: Optional[str]):
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
ck.compute_projection(basis="umap", copy=False, key_added=key_added)
key = Key.uns.kernel(ck.backward, key=key_added)
ukey = f"{key}_params"
key = f"{key}_umap"
assert adata.uns[ukey] == {"embeddings": ["umap"]}
np.testing.assert_array_equal(adata.obsm[key].shape, adata.obsm["X_umap"].shape)
@pytest.mark.parametrize("copy", [True, False])
def test_copy(self, adata: AnnData, copy: bool):
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
res = ck.compute_projection(basis="umap", copy=copy)
if copy:
assert isinstance(res, np.ndarray)
np.testing.assert_array_equal(res.shape, adata.obsm["X_umap"].shape)
else:
assert res is None
key = Key.uns.kernel(ck.backward) + "_umap"
np.testing.assert_array_equal(
adata.obsm[key].shape, adata.obsm["X_umap"].shape
)
def test_nan_in_embedding(self, adata: AnnData):
adata.obsm["X_umap"][-1] = np.nan
ck = cr.tl.kernels.ConnectivityKernel(adata).compute_transition_matrix()
res = ck.compute_projection(basis="umap", copy=True)
assert not np.all( | np.isnan(res) | numpy.isnan |
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
from ..utils.misc import to_numpy
def mask2result(det_bboxes,
det_labels,
det_masks,
num_classes,
mask_thr_binary=0.5,
rle=True,
full_size=True,
img_size=None):
masks = to_numpy(det_masks)
bboxes = to_numpy(det_bboxes, np.int32)[:, :4]
labels = to_numpy(det_labels, np.int32)
cls_masks = [[] for _ in range(num_classes - 1)]
for bbox, label, mask in zip(bboxes, labels, masks):
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
mask = mmcv.imresize(mask, (w, h))
mask = (mask > mask_thr_binary).astype(np.uint8)
if full_size:
assert img_size is not None
im_mask = | np.zeros(img_size[:2], dtype=np.uint8) | numpy.zeros |
# FUNZIONI PER LA VALUTAZIONE DEI MODELLI.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import MinMaxScaler
################# VALUTAZIONE MODELLI CON TRAINING/VALIDATION/TEST
# Funzione che calcola lo score di un dato modello rispetto al training set, validation set e test set.
# In input si passa il modello e il dataset (X,y). Posso specificare se voglio scalare le features in X con MinMaxScaler. Si può specificare inoltre la
# size del test set, il random state per splittare in training e test, il numero di fold per la cross validation. Infine si può specificare se si tratta
# di regressione o classificazione.
# X ha dimenzione (n_istanze,n_features) ; y ha dimensione (n_istanze,).
def compute_train_val_test(model ,X ,y ,scale=False ,test_size=0.2 ,random_state=123, cv=5 ,regr=True):
scoring=""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
if(scale): # Scalo le features in X
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Splitto in training e test.
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
# Cross validation
scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)
val_acc = scores.mean() # score sul validation
if regr:
val_acc = -val_acc
model.fit(X_train_80,y_train_80) # Fitto usando tutto il training.
# Calcolo la score sul training e sul test.
train_acc=0
test_acc=0
if regr:
train_acc = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_acc = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))
else:
train_acc = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_acc = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))
return train_acc, val_acc, test_acc # Ritorno una tripla : score sul training, validation, test.
# Funzione che data una lista modelli calcola per ciascuno di essi lo score su training/validation/test. Ritorna una lista dove per ogni modello è
# appunto calcolato lo score su training/validation/test (tripla) e ritorna anche l'indice del modello migliore : il modello migliore è quello con score
# migliore rispetto al validation.
# Alla funzione passo la lista di modelli da valutare e il dataset completo (X,y). Posso inoltre passare:
# - scale, test_size, random_state, cv, regr --> come spiegato in precedenza
# - plotta --> specifica alla funzione se deve fare il grafico sulla valutazione dei modelli
# - plottaTrain --> specifica se nel grafico si vuole mostrare anche lo score dei vari modelli sul training
# - plottaTest --> specifica se nel grafico si vuole mostrare anche lo score dei vari modelli sul test set
# - xvalues --> specifica i valori da mettere sull'asse delle x
# - xlabel --> specifica l'etichetta da mettere sull'asse x
# - title --> specifica il titolo da mettere al grafico
def model_selection_TrainValTest(model_list, X, y, scale=False, test_size=0.2, random_state=123, cv=5, regr=True, plotta=False, plottaTrain=False,
plottaTest=False, xvalues=None, xlabel="Complessità", title="Valutazione modelli con Training/Validation/Test"):
if(scale): # Scalo le features in X, se specificato.
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Lista trainValTest_list : conterrà per ogni modello la tripla degli score su training/validation/test.
trainValTest_list = []
# Calcolo i vari score per ogni modello.
for model in model_list:
trainValTest_list.append(list(compute_train_val_test(model ,X, y, False, test_size, random_state, cv, regr=regr)))
trainValTest_list = np.array(trainValTest_list) # in numpy
if(plotta): # Faccio il grafico
if(xvalues is None): # Valori di deafult sull'asse delle x
xvalues = range(len(model_list))
fig, ax = plt.subplots(figsize=(6,6))
if plottaTrain: # Devo disegnare anche lo score sul training set.
ax.plot(xvalues,trainValTest_list[:,0], 'o:', label='Train')
ax.plot(xvalues,trainValTest_list[:,1], 'o:', label='Validation') # Score validation
if plottaTest: # Devo disegnare anche lo score sul test set.
ax.plot(xvalues,trainValTest_list[:,2], 'o:', label='Test')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
ax.legend()
# Ritorno una coppia : la lista di score train/val/test per ogni modello ; l'indice del modello con score sul validation migliore.
if regr: # regressione
return trainValTest_list, np.argmin(trainValTest_list,axis=0)[1]
return trainValTest_list, np.argmax(trainValTest_list,axis=0)[1] # classificazione
########## VALUTAZIONE MODELLI CON BIAS/VARIANCE/ERROR
# Funzione che calcola il bias, la varianza e l'errore di un dato modello.
# In input si passa il modello, il dataset su cui si vuole effettuare la valutazione (X,y). Posso anche specificare se le features in X devono essere
# scalate usando MinMaxScaler. E si possono anche specificare il numero di test da fare per calcolare bias/variance/error e la dimensione di ogni
# campione rispetto al dataset completo.
# X ha dimenzione (n_istanze,n_features) ; y ha dimensione (n_istanze,).
def compute_bias_variance_error(model ,X ,y ,scale=False ,N_TESTS = 20 ,sample_size=0.67):
# Scalo X se specificato
if(scale):
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Vettore "vector_ypred": alla fine sarà una matrice con tante righe quanto N_TESTS (ogni riga corrisponde ad un campione) e tante colonne quanto
# ogni punto di X (ogni colonna è un punto del dataset).
# Riga i ---> ci sono le predizioni fatte dal modello sul campione i su tutti i punti del dataset
# Colonna j ---> ci sono le predizioni fatte sul punto j da tutti gli N_TESTS campioni.
vector_ypred = []
# Itero su N_TESTS. Ad ogni iterazione estraggo il modello dallo specifico campione i.
for i in range(N_TESTS):
# Prendo un campione del dataset.
Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) )
# Estraggo il modello dal campione i.
model.fit(Xs,ys)
# Aggiungo le predizioni fatte dal modello su tutti i punti del dataset.
vector_ypred.append(list(model.predict(X)))
vector_ypred = np.array(vector_ypred) # Trasformo in numpy
# Vettore che ha tanti elementi quanti i punti del dataset e per ciascuno ha il relativo bias calcolato sugli N_TESTS campioni.
vector_bias = (y - np.mean(vector_ypred, axis=0))**2
#Vettore che ha tanti elementi quanti i punti del dataset e per ciascuno ha la relativa variance calcolata sugli N_TESTS campioni.
vector_variance = np.var(vector_ypred, axis=0)
# Vettore che ha tanti elementi quanti i punti del dataset e per ciascuno ha il relativo error calcolato sugli N_TESTS campioni.
vector_error = | np.sum((vector_ypred - y)**2, axis=0) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 11:01:22 2020
@author: twallema
Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved.
"""
import numpy as np
import pandas as pd
from random import choices
import scipy
from scipy.integrate import odeint
import math
import models
import networkx
from scipy import interpolate as inter
from gekko import GEKKO
def sampleFromDistribution(filename,k):
df = pd.read_csv(filename)
x = df.iloc[:,0]
y = df.iloc[:,1]
return(np.asarray(choices(x, y, k = k)))
def runSimulation(initN,beta,sigmavect,Nc,zeta,smvect,mvect,hvect,cvect,dsm,dm,dhospitalvect,dh,dcfvect,dcrvect,mc0,ICU,theta_S,theta_E,theta_SM,theta_M,theta_R,totalTests,psi_FP,psi_PP,dq,phi_S,phi_E,phi_SM,phi_R,initE,
initSM, initM, initH, initC,initHH,initCH,initR,initF,initSQ,initEQ,initSMQ,initMQ,initRQ,simtime,monteCarlo,method,modelType,checkpoints,**stoArgs):
tN = simtime + 1
if monteCarlo == True:
n_samples = dcfvect.size
S = np.zeros([tN,n_samples])
E = np.zeros([tN,n_samples])
SM = np.zeros([tN,n_samples])
M = np.zeros([tN,n_samples])
H = np.zeros([tN,n_samples])
C = np.zeros([tN,n_samples])
HH = np.zeros([tN,n_samples])
CH = np.zeros([tN,n_samples])
R = np.zeros([tN,n_samples])
F = np.zeros([tN,n_samples])
SQ = | np.zeros([tN,n_samples]) | numpy.zeros |
import numpy as np
import pandas as pd
from scipy import stats
import itertools
import time
import pdb
from operator import itemgetter
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
import statsmodels.formula.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
def intervention_order(num_features, text_i2r=False, numeric=False):
if text_i2r or numeric:
intervention_inds_numeric = []
intervention_inds = []
for f_i in range(2 ** num_features):
bitstr = to_bitstring(f_i, num_features)
# Indices where where we have 0 in this bitstring correspond
# to features where an intervention has been performed (i.e., feature is
# assigned to input value).
intervention_inds.append(str([i for i, b in enumerate(bitstr) if b == '0']))
if text_i2r or numeric:
intervention_inds_numeric.append([i for i, b in enumerate(bitstr) if b == '0'])
if text_i2r:
return intervention_inds, intervention_inds_numeric
if numeric:
return intervention_inds_numeric
return intervention_inds
def to_bitstring(num, num_bits):
# Returns the bitstring corresponding to (base 10) number 'num'
bitstr = bin(num)[2:] # 2: to remove the initial '0b'
# Append '0's at the beginning to make it of length 'num_bits'
return ''.join(['0' for _ in range(num_bits - len(bitstr))]) + bitstr
def close_or_distant_neighbours(df, inp, col_name, like=True, perc=0.1):
similars = df[col_name].apply(lambda x: cosine_similarity(x.reshape(1, -1), inp[col_name].item().reshape(1, -1)))
if like:
chosen = similars.sort_values(ascending=False)[:int(len(similars) * perc)]
else:
chosen = similars.sort_values(ascending=True)[:int(len(similars) * perc)]
return chosen.index
def create_CF(inp, refs, clf, num_features, MAD_features_cost,
r2i=True, datatype='Tabular', raw_text=False, causal_SCM=None,
col_con=None, col_cat=None, predict=False):
np.random.seed(42)
CF = {'cfs': [], 'intervention_sets': [], 'cardinality': [],
'original': [], 'cost': [], 'model_pred': []}
intervention_order_ids = intervention_order(num_features)
cardinalities = [len(eval(x)) for x in intervention_order_ids]
# Keep track of col types for cost computation
if not col_con:
col_con = [i for i in range(num_features)
if (isinstance(inp.iloc[0, i], int) | isinstance(inp.iloc[0, i], float))]
col_cat = list(set(range(num_features)) - set(col_con))
# construction of interventions
for ind, ref in refs.iterrows():
cfs_ref = []
if r2i:
if not causal_SCM:
cfs_ref = list(itertools.product(*zip(np.array(inp)[0][:num_features],
np.array(ref)[:num_features])))
else:
for interv_ind in intervention_order_ids:
original_values = pd.DataFrame(ref).T
# This line is to prevent from applying intervention that are the same as origin. values
# Note that order of columns in diff_values_intervention are different than original
# b/c sets do not respect order, but all access in sample_from_SCM is invariant to column order
if interv_ind != '[]':
intervention_values = inp.iloc[:, eval(interv_ind)].to_dict('records')[0].items()
diff_values_intervention = \
pd.DataFrame.from_dict(dict(intervention_values -
(original_values.to_dict('records')[0].items() &
intervention_values)), orient='index').T
else:
diff_values_intervention = pd.DataFrame([])
cfs_ref.append(sample_from_SCM(diff_values_intervention, original_values, causal_SCM))
CF['original'] += [str(ref.values[:num_features])] * (2 ** num_features)
else:
if not causal_SCM:
cfs_ref = list(itertools.product(*zip(np.array(ref)[:num_features],
np.array(inp)[0][:num_features])))
else:
for interv_ind in intervention_order_ids:
original_values = inp
# This block is to prevent from applying intervention that are the same as origin. values
# Note that order of columns in diff_values_intervention are different than original
# b/c sets do not respect order, but all access in sample_from_SCM is invariant to column order
if interv_ind != '[]':
intervention_values = pd.DataFrame(ref[eval(interv_ind)]).T.to_dict('records')[0].items()
diff_values_intervention = \
pd.DataFrame.from_dict(dict(intervention_values -
(original_values.to_dict('records')[0].items() &
intervention_values)), orient='index').T
else:
diff_values_intervention = pd.DataFrame([])
cfs_ref.append(sample_from_SCM(diff_values_intervention, original_values, causal_SCM))
CF['original'] += [str(inp.values[0][:num_features])] * (2 ** num_features)
CF['cfs'] += cfs_ref
# for raw text, just interested in text rep of possible interventions
if not raw_text:
# otherwise, compute model preds, cost, etc.
# mark intervention targets
CF['intervention_sets'] += intervention_order_ids
CF['cardinality'] += cardinalities
# obtain model prediction for CFs
if predict:
if datatype == 'Text':
CF['model_pred'].extend(clf.predict(
np.array(cfs_ref).reshape(len(cfs_ref), -1)))
elif datatype == 'Dice':
hstacked = np.hstack(np.hstack(
np.array(cfs_ref, dtype=object))).reshape(len(cfs_ref), -1)
CF['model_pred'].extend((clf.predict(hstacked) >= 0.5) * 1.)
else:
CF['model_pred'].extend(clf.predict(cfs_ref))
# cost computation
if r2i:
if not causal_SCM:
costs = cost(cfs_ref, ref.values[:num_features],
MAD_features_cost, col_con, col_cat,
l1_MAD=True, datatype=datatype)
else:
# This block is to prevent assigning cost to downstream effects of interventions
intervention_inds = intervention_order(num_features, numeric=True)
intervention_inds_len = len(intervention_inds)
cost_mask = np.zeros((intervention_inds_len, num_features))
for i, intervention in enumerate(intervention_inds):
cost_mask[i, intervention] = 1.
ref_tiled = np.tile(ref.values[:num_features], (intervention_inds_len, 1))
cfs_ref_masked_w_ref = np.where(cost_mask == 0, ref_tiled, np.array(cfs_ref))
costs = cost(cfs_ref_masked_w_ref, ref.values[:num_features],
MAD_features_cost, col_con, col_cat,
l1_MAD=True, datatype=datatype)
else:
if not causal_SCM:
costs = cost(cfs_ref, inp.values[0][:num_features],
MAD_features_cost, col_con, col_cat,
l1_MAD=True, datatype=datatype)
else:
# This block is to prevent assigning cost to downstream effects of interventions
intervention_inds = intervention_order(num_features, numeric=True)
intervention_inds_len = len(intervention_inds)
cost_mask = np.zeros((intervention_inds_len, num_features))
for i, intervention in enumerate(intervention_inds):
cost_mask[i, intervention] = 1.
inp_tiled = np.tile(inp.values[0][:num_features], (intervention_inds_len, 1))
cfs_ref_masked_w_inp = np.where(cost_mask == 0, inp_tiled, np.array(cfs_ref))
costs = cost(cfs_ref_masked_w_inp, inp.values[0][:num_features],
MAD_features_cost, col_con, col_cat,
l1_MAD=True, datatype=datatype)
CF['cost'] += list(costs)
CF_df = pd.DataFrame(CF['cfs'], columns=inp.columns[:num_features])
if not raw_text:
CF_df['Original'] = CF['original']
CF_df['Intervention_index'] = CF['intervention_sets']
if predict:
CF_df['Model_pred'] = CF['model_pred']
CF_df['Cost'] = CF['cost']
CF_df['Cardinality'] = CF['cardinality']
return CF_df
# Causal model fitting and predicting
def fit_scm(dataset):
np.random.seed(42)
# Age and Sex are root nodes and don't need fitting
# Job
job_fn = RandomForestClassifier()
job_fn.fit(np.vstack((dataset['Age'].values,
dataset['Sex'].values)).reshape(-1, 2),
dataset['Job'].values)
# Savings
savings_fn = smf.ols(formula="Savings ~ Age + Sex + Job", data=dataset).fit()
savings_rmse = np.sqrt(np.mean(savings_fn.resid ** 2))
# Housing
housing_fn = RandomForestClassifier()
housing_fn.fit(np.vstack((dataset['Job'].values,
dataset['Savings'].values)).reshape(-1, 2),
dataset['Housing'].values)
# Checking
checking_fn = smf.ols(formula="Checking ~ Job + Savings", data=dataset).fit()
checking_rmse = np.sqrt(np.mean(checking_fn.resid ** 2))
# Credit
credit_fn = smf.ols(formula="Credit ~ Age + Job + Housing", data=dataset).fit()
credit_rmse = np.sqrt(np.mean(credit_fn.resid ** 2))
# Duration
duration_fn = smf.ols(formula="Duration ~ Credit + Savings", data=dataset).fit()
duration_rmse = np.sqrt(np.mean(duration_fn.resid ** 2))
# Purpose
purpose_fn = RandomForestClassifier()
purpose_fn.fit(np.vstack((dataset['Age'].values, dataset['Housing'].values,
dataset['Credit'].values, dataset['Duration'].values)).reshape(-1, 4),
dataset['Purpose'].values)
return {'job_fn': job_fn, 'savings_fn': savings_fn, 'savings_rmse': savings_rmse,
'housing_fn': housing_fn, 'checking_fn': checking_fn, 'checking_rmse': checking_rmse,
'credit_fn': credit_fn, 'credit_rmse': credit_rmse, 'duration_fn': duration_fn,
'duration_rmse': duration_rmse, 'purpose_fn': purpose_fn}
def sample_from_SCM(intervention_values, original_values, SCM_model, n=1):
intervened = 0
# Age
if 'Age' in intervention_values.columns:
age_SCM = intervention_values['Age'].item()
intervened = 1
else:
age_SCM = original_values['Age'].item()
# Sex
if 'Sex' in intervention_values.columns:
sex_SCM = intervention_values['Sex'].item()
intervened = 1
else:
sex_SCM = original_values['Sex'].item()
# Job
if 'Job' in intervention_values.columns:
job_SCM = intervention_values['Job'].item()
intervened = 1
else:
if intervened == 0:
job_SCM = original_values['Job'].item()
else:
predict_proba_job = SCM_model['job_fn'].predict_proba(
np.vstack((age_SCM, sex_SCM)).reshape(-1, 2))
job_SCM = np.random.choice(len(predict_proba_job.squeeze(0)),
1, p=predict_proba_job.squeeze(0)).item()
# Savings
if 'Savings' in intervention_values.columns:
savings_SCM = intervention_values['Savings'].item()
intervened = 1
else:
if intervened == 0:
savings_SCM = original_values['Savings'].item()
else:
savings_SCM = (SCM_model['savings_fn'].predict(
exog=dict(Age=age_SCM, Sex=sex_SCM, Job=job_SCM)).item() +
np.random.normal(scale=SCM_model['savings_rmse'], size=n))[0]
if savings_SCM < 0:
savings_SCM = 0.
# Housing
if 'Housing' in intervention_values.columns:
housing_SCM = intervention_values['Housing'].item()
intervened = 1
else:
if intervened == 0:
housing_SCM = original_values['Housing'].item()
else:
predict_proba_housing = SCM_model['housing_fn'].predict_proba(
np.vstack((job_SCM, savings_SCM)).reshape(-1, 2))
housing_SCM = np.random.choice(len(predict_proba_housing.squeeze(0)),
1, p=predict_proba_housing.squeeze(0)).item()
# Checking
if 'Checking' in intervention_values.columns:
checking_SCM = intervention_values['Checking'].item()
intervened = 1
else:
if intervened == 0:
checking_SCM = original_values['Checking'].item()
else:
checking_SCM = (SCM_model['checking_fn'].predict(
exog=dict(Job=job_SCM, Savings=savings_SCM)).item() +
| np.random.normal(scale=SCM_model['checking_rmse'], size=n) | numpy.random.normal |
import numpy as np
import numpy.random as npr
import scipy as sc
from scipy import stats
from scipy.special import logsumexp
from scipy.stats import multivariate_normal as mvn
from scipy.stats import invwishart
from sds.utils.stats import multivariate_normal_logpdf as lg_mvn
from sds.utils.general import linear_regression, one_hot
from sds.distributions.categorical import Categorical
from sds.distributions.gaussian import StackedGaussiansWithPrecision
from sds.distributions.gaussian import StackedGaussiansWithDiagonalPrecision
from sds.distributions.lingauss import StackedLinearGaussiansWithPrecision
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithDiagonalPrecision
from sklearn.preprocessing import PolynomialFeatures
from functools import partial
from operator import mul
import copy
class InitCategoricalState:
def __init__(self, nb_states, **kwargs):
self.nb_states = nb_states
self.pi = 1. / self.nb_states * np.ones(self.nb_states)
@property
def params(self):
return self.pi
@params.setter
def params(self, value):
self.pi = value
def permute(self, perm):
self.pi = self.pi[perm]
def initialize(self):
pass
def likeliest(self):
return np.argmax(self.pi)
def sample(self):
return npr.choice(self.nb_states, p=self.pi)
def log_init(self):
return np.log(self.pi)
def mstep(self, p, **kwargs):
eps = kwargs.get('eps', 1e-8)
pi = sum([_p[0, :] for _p in p]) + eps
self.pi = pi / sum(pi)
class InitGaussianObservation:
def __init__(self, nb_states, obs_dim, act_dim, nb_lags=1, **kwargs):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
# self.mu = npr.randn(self.nb_states, self.obs_dim)
# self._sigma_chol = 5. * npr.randn(self.nb_states, self.obs_dim, self.obs_dim)
self.mu = np.zeros((self.nb_states, self.obs_dim))
self._sigma_chol = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
for k in range(self.nb_states):
_sigma = invwishart.rvs(self.obs_dim + 1, np.eye(self.obs_dim))
self._sigma_chol[k] = np.linalg.cholesky(_sigma * np.eye(self.obs_dim))
self.mu[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
@property
def sigma(self):
return np.matmul(self._sigma_chol, np.swapaxes(self._sigma_chol, -1, -2))
@sigma.setter
def sigma(self, value):
self._sigma_chol = np.linalg.cholesky(value + 1e-8 * np.eye(self.obs_dim))
@property
def params(self):
return self.mu, self._sigma_chol
@params.setter
def params(self, value):
self.mu, self._sigma_chol = value
def permute(self, perm):
self.mu = self.mu[perm]
self._sigma_chol = self._sigma_chol[perm]
def initialize(self, x, **kwargs):
x0 = np.vstack([_x[:self.nb_lags] for _x in x])
self.mu = np.array([np.mean(x0, axis=0) for k in range(self.nb_states)])
self.sigma = np.array([np.cov(x0, rowvar=False) for k in range(self.nb_states)])
def mean(self, z):
return self.mu[z]
def sample(self, z):
x = mvn(mean=self.mean(z), cov=self.sigma[z]).rvs()
return np.atleast_1d(x)
def log_likelihood(self, x):
if isinstance(x, np.ndarray):
x0 = x[:self.nb_lags]
log_lik = np.zeros((x0.shape[0], self.nb_states))
for k in range(self.nb_states):
log_lik[:, k] = lg_mvn(x0, self.mean(k), self.sigma[k])
return log_lik
else:
return list(map(self.log_likelihood, x))
def mstep(self, p, x, **kwargs):
x0, p0 = [], []
for _x, _p in zip(x, p):
x0.append(_x[:self.nb_lags])
p0.append(_p[:self.nb_lags])
J = np.zeros((self.nb_states, self.obs_dim))
h = np.zeros((self.nb_states, self.obs_dim))
for _x, _p in zip(x0, p0):
J += np.sum(_p[:, :, None], axis=0)
h += np.sum(_p[:, :, None] * _x[:, None, :], axis=0)
self.mu = h / J
sqerr = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
norm = np.zeros((self.nb_states, ))
for _x, _p in zip(x0, p0):
resid = _x[:, None, :] - self.mu
sqerr += np.sum(_p[:, :, None, None] * resid[:, :, None, :]
* resid[:, :, :, None], axis=0)
norm += np.sum(_p, axis=0)
self.sigma = sqerr / norm[:, None, None]
def smooth(self, p, x):
if all(isinstance(i, np.ndarray) for i in [p, x]):
p0 = p[:self.nb_lags]
return p0.dot(self.mu)
else:
return list(map(self.smooth, p, x))
class InitGaussianControl:
def __init__(self, nb_states, obs_dim, act_dim,
nb_lags=1, degree=1, **kwargs):
assert nb_lags > 0
self.nb_states = nb_states
self.obs_dim = obs_dim
self.act_dim = act_dim
self.nb_lags = nb_lags
self.degree = degree
self.feat_dim = int(sc.special.comb(self.degree + self.obs_dim, self.degree)) - 1
self.basis = PolynomialFeatures(self.degree, include_bias=False)
# self.K = npr.randn(self.nb_states, self.act_dim, self.feat_dim)
# self.kff = npr.randn(self.nb_states, self.act_dim)
# self._sigma_chol = 5. * npr.randn(self.nb_states, self.act_dim, self.act_dim)
self.K = np.zeros((self.nb_states, self.act_dim, self.feat_dim))
self.kff = np.zeros((self.nb_states, self.act_dim))
self._sigma_chol = np.zeros((self.nb_states, self.act_dim, self.act_dim))
for k in range(self.nb_states):
_sigma = invwishart.rvs(self.act_dim + 1, np.eye(self.act_dim))
self._sigma_chol[k] = np.linalg.cholesky(_sigma * np.eye(self.act_dim))
self.K[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(self.feat_dim, )).T
self.kff[k] = mvn.rvs(mean=None, cov=1e2 * _sigma, size=(1, ))
@property
def sigma(self):
return np.matmul(self._sigma_chol, | np.swapaxes(self._sigma_chol, -1, -2) | numpy.swapaxes |
import sys
import itertools
from typing import Tuple
import numpy as np
from tqdm import tqdm
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_validate, train_test_split
from scipy.special import xlogy
def select_rf(z: np.ndarray, t: np.ndarray, random_state: int):
assert z.ndim == 2
assert z.shape[0] == t.shape[0]
batch_size, z_dim = z.shape
_, t_dim = t.shape
# t = (t >= t_threshold).astype(np.uint8)
hyperparameters = {
"max_depth": [2, 4, 8, 12],
"n_estimators": [10],
"n_jobs": [-1],
}
names = list(hyperparameters.keys())
indices = itertools.product(*hyperparameters.values())
best_score, best_kwargs = float("inf"), {}
for index in list(indices):
kwargs = dict(zip(names, index))
estimator = RandomForestRegressor(**kwargs, random_state=random_state)
scores = []
for i in tqdm(range(t_dim)):
score_i_cv = cross_validate(estimator, z, t[:,i],
scoring="neg_mean_squared_error", n_jobs=-1, cv=5)
score_i = -np.mean(score_i_cv["test_score"])
scores.append(score_i)
entire_score = np.mean(scores)
if best_score > entire_score:
best_score = entire_score
best_kwargs = kwargs
print(f"[classifier.py: select_rf()] {entire_score:.6f} in {kwargs}", file=sys.stderr)
print(f"[classifier.py: select_rf()] {best_score:.6f} in {best_kwargs}", file=sys.stderr)
return RandomForestRegressor(**best_kwargs)
def disentanglement(gini: np.ndarray) -> float:
z_dim, t_dim = gini.shape
p = np.abs(gini)
p = p / np.maximum(1e-12, np.sum(p, axis=1, keepdims=True))
d = 1 + np.sum(xlogy(p, p) / np.log(t_dim), axis=1)
rho = np.sum(gini, axis=1) / np.maximum(1e-12, np.sum(gini))
return float(np.sum(rho * d))
def completeness(gini: np.ndarray) -> np.ndarray:
z_dim, t_dim = gini.shape
p = np.abs(gini)
p = p / np.maximum(1e-12, np.sum(p, axis=0, keepdims=True))
d = 1 + np.sum(xlogy(p, p) / np.log(z_dim), axis=0)
return d
def explicitness(pred_error: np.ndarray) -> np.ndarray:
return np.maximum(0, 1 - 6 * pred_error)
def dci_score(z: np.ndarray, t: np.ndarray, random_state: int = 42) -> Tuple[float, np.ndarray, np.ndarray]:
assert z.ndim == 2
assert z.shape[0] == t.shape[0]
batch_size, z_dim = z.shape
_, t_dim = t.shape
z_train, z_test, t_train, t_test = train_test_split(z, t, test_size=0.33, random_state=random_state)
estimator = select_rf(z_train, t_train, random_state)
gini = np.empty(shape=(z_dim, t_dim), dtype=np.float32)
pred_error = np.empty(shape=(t_dim, ), dtype=np.float32)
for i in tqdm(range(t_dim)):
estimator.fit(z_train, t_train[:,i])
square_error = (estimator.predict(z_test) - t_test[:,i]) ** 2
pred_error[i] = np.mean(square_error)
gini[:,i] = estimator.feature_importances_
return disentanglement(gini), completeness(gini), explicitness(pred_error)
# unit tests
if __name__ == "__main__":
print("independent case:")
t = np.random.uniform(size=(2048, 10))
z = np.random.normal(size=(2048, 10))
d, c, i = dci_score(z, t)
print(d, c.mean(), i.mean())
print("dependent case:")
t = np.random.uniform(size=(2048, 10))
z = t @ np.random.normal(size=(10, 10))
d, c, i = dci_score(z, t)
print(d, c.mean(), i.mean())
print("perfect case:")
t = np.random.uniform(size=(2048, 10))
z = t
d, c, i = dci_score(z, t)
print(d, c.mean(), i.mean())
print("lo-mod hi-com:")
t = np.random.uniform(size=(2048, 10))
z = np.random.normal(size=(2048, 10)) * 2 - 1
z[:,:1] = t @ np.random.normal(size=(10, 1))
d, c, i = dci_score(z, t)
print(d, c.mean(), i.mean())
print("hi-mod lo-com:")
t = | np.random.uniform(size=(2048, 3)) | numpy.random.uniform |
from numba import cuda
import numpy as np
from numba.cuda.testing import SerialMixin
import unittest
class TestCudaAutojit(SerialMixin, unittest.TestCase):
def test_device_array(self):
@cuda.autojit
def foo(x, y):
i = cuda.grid(1)
y[i] = x[i]
x = np.arange(10)
y = | np.empty_like(x) | numpy.empty_like |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
kuehbach at fhi - berlin . mpg . de
parser for file formats
processtype: post-processing/analyze
methodgroup: aptfim
methodtype: parser
methodvariant: rrng file format, ###MK::give more detail what this format is
2021/03/01
"""
import os, sys, glob, re
import numpy as np
from aptfimleapparser.utils.nomad4exp_keyvalue_struct import *
from aptfimleapparser.utils.nomad4exp_process_aptfim_utils_ranging import *
class rrng_range():
def __init__(self, i, line, ionnm_dict, *args, **kwargs):
#i = 1
#line = txt_stripped[fp]
#line = txt_stripped[85]
self.id = None
self.mqmin = None
self.mqmax = None
self.comp = []
self.vol = None
self.color = None
tmp = re.split(r'[\s=]+', line)
if tmp[0] == 'Range' + str(i) and len(tmp) >= 6:
self.id = 'Range' + str(i)
else:
raise ValueError('RRNG file is corrupted in line for ' + line + ' for range keyword and/or insufficient number of key-value pairs !')
###MK::color field is an optional entry
###MK::D. <NAME>son et al. 2013 book p253 report that
###MK::mqmin, mqmax, vol, ion composition is required, name and color fields are optional
###MK::'Range6 = 106.1250 213.4110 vol:0.00000 Name: Noise Color:0000FF'
###MK::'Range7 = 42.8160 43.3110 vol:0.04543 Al:1 O:1 Name: AlOLikely Color:00FFFF'
###MK::the Name: field is optional for giving ranges custom names.
###MK::custom ion names cannot be used to define ion types
if tmp[-1].lower().startswith('color:') and len(re.split(r':', tmp[-1])[1]) == 6: ##MK::make more robust to handle optionality of color and
self.color = '#' + re.split(r':', tmp[-1])[1]
#HEX_COLOR_REGEX = r'^([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$' #replace r'^#( ...
#regexp = re.compile(HEX_COLOR_REGEX)
#if regexp.search(tmp[-1].split(r':')):
# return True
#return False
else:
raise ValueError('RRNG file is corrupted in line ' + line + ' for color keyword !')
if np.float64(tmp[1]) > 0.0 and np.float64(tmp[2]) > 0.0:
if (np.float64(tmp[2]) - np.float64(tmp[1])) > np.float64(MQ_EPSILON):
self.mqmin = tmp[1]
self.mqmax = tmp[2]
else:
raise ValueError('RRNG file is corrupted in line ' + line + ' for mqmin <= mqmax !')
else:
raise ValueError('RRNG file is corrupted in line ' + line + ' for mqmax and/or mqmin <= 0.0 !')
if tmp[3].lower().startswith('vol:'):
V = re.split(r':', tmp[3])[1]
if np.float64(V) > 0.0:
self.vol = V
else:
Warning('RRNG file is corrupted in line ' + line + ' for volume <= 0.0 !')
else:
raise ValueError('RRNG file is corrupted in line ' + line + ' for volume keyword !')
components = tmp[4:-1]
for c in components:
name_mult = re.split(r':+', c)
if name_mult[0] in ionnm_dict['metadata']['ionnames'].values() and np.uint8(name_mult[1]) > 0:
for j in np.arange(0,int(name_mult[1])):
self.comp.append( name_mult[0] )
class n4e_parser_aptfim_io_read_rrng():
def __init__(self, fn, *args, **kwargs):
"""
reads all content from a RRNG file
in: string fn, name of file to be read
out: class object instance representing content of rrng file
"""
#specify which information content, (meta)data-wise an RRNG file holds to parse for/inform nomad
self.a = {}
self.a['metadata'] = {}
self.a['metadata']['ionnames'] = {}
self.a['metadata']['ranges'] = {}
self.a['metadata']['species'] = {}
#self.a['metadata']['colors'] = {}
#read the ASCII textfile RRNG format and interpret it
#fnm = '/home/kuehbach/GITHUB/FAIRMAT-PARSER/fairmat_areab_parser/tutorials/aptfim/examples/usa_richland_pnnl/R31_06365-v02.rrng'
#fnm = '/home/kuehbach/GITHUB/FAIRMAT-PARSER/fairmat_areab_parser/tutorials/aptfim/examples/deu_duesseldorf_mpie/Se Ho Beispiel R5076_44076-v02.rrng'
#fnm = '/home/kuehbach/FHI_FHI_FHI/Paper/xxxx_ParaprobeAnalyticsAsAFairMatPlugin/research/aus_sydney_rielli_primig/R04_22071.RRNG'
fnm = fn
with open( fnm, mode='r', encoding='utf8' ) as rrngf:
txt = rrngf.read()
#replace eventual windows line breaks with unix line feeds, ##MK::required for the splitting in next line to work
txt = txt.replace('\r\n', '\n')
#replace eventual comma decimal points by dots
txt = txt.replace(',', '.')
#strip empty lines and lines with comments (see <NAME>. Larson et al. book 2013)
txt_stripped = [line for line in txt.split('\n') if line.strip() != '' and '#' not in line]
del txt
#the equivalent of a file respective line pointer, here an index which line we process
fp = 0
#parse ion names including real periodic table element names, as well as user specified ranges
###MK::needed because sometimes experimentalists hijack the RRNG file format to define custom names for post-processing certain regions of
###MK::a mass-to-charge-state spectrum
#a = {}
#a['metadata'] = {}
#a['metadata']['ionnames'] = {}
if txt_stripped[fp] == '[Ions]':
fp += 1
tmp = re.split(r'[\s=]+', txt_stripped[fp])
if tmp[0] == 'Number' and np.int64(tmp[1]) > 0:
Nions = int(tmp[1])
fp += 1
for i in np.arange(0,Nions):
tmp = re.split(r'[\s=]+', txt_stripped[fp+i])
if tmp[0] == 'Ion' + str(i+1) and type(tmp[1]) == str and len(tmp[1]) > 0:
self.a['metadata']['ionnames'][tmp[0]] = tmp[1]
else:
raise ValueError('RRNG file is corrupted because Ion*= line is incorrectly formatted !')
fp += Nions
else:
raise ValueError('RRNG file is corrupted because [Ions] Number=* line is incorrectly formatted !')
else:
raise ValueError('RRNG file is corrupted because [Ions] list header is not at expected position !')
#parse range specifications
#a['metadata']['ranges'] = {}
if txt_stripped[fp] == '[Ranges]':
fp += 1
tmp = re.split(r'[\s=]+', txt_stripped[fp])
if tmp[0] == 'Number' and np.int64(tmp[1]) > 0:
Nranges = int(tmp[1])
fp += 1
for i in np.arange(0,Nranges):
obj = None
obj = rrng_range(i+1, txt_stripped[fp+i], self.a)
if obj != None:
self.a['metadata']['ranges'][obj.id] = obj
else:
raise ValueError('RRNG file is corrupted because Range*= line is incorrectly formatted !')
fp += Nranges
else:
raise ValueError('RRNG file is corrupted because [Ranges] Number=* line is incorrectly formatted !')
else:
raise ValueError('RRNG file is corrupted because [Ranges] list header is not at expected position !')
#build NOMAD ion species
#b = aa.a
#b['metadata']['species'] = {}
#iontype_id = 1
#usertype_id = 1
element_symbols = [] #create lookup table for known elements
element_z = []
for el in pse.elements:
element_symbols.append(el.symbol)
element_z.append(el.number)
for obj in self.a['metadata']['ranges'].values():
#obj = b['metadata']['ranges']['Range65']
#create an identifier
hashvector = np.empty(0, dtype=np.uint16())
for c in obj.comp:
if c in element_symbols:
nprotons = element_z[element_symbols.index(c)]
nneutrons = 0 ###MK::rrng stores no isotope pieces of information
else:
raise Warning('Skipping user type for now !')
hashvector = np.append( hashvector, hash_isotope(nprotons, nneutrons) )
hashvector = np.flip( | np.sort(hashvector, kind='stable') | numpy.sort |
"""
Module for defining function spaces in the Fourier family
"""
import sympy as sp
import numpy as np
from mpi4py_fft import fftw
from shenfun.spectralbase import SpectralBase, Transform, islicedict, slicedict
from shenfun.optimization.cython import convolve
__all__ = ['FourierBase', 'R2C', 'C2C']
#pylint: disable=method-hidden, no-member, line-too-long, arguments-differ
class FourierBase(SpectralBase):
r"""Fourier base class
A basis function :math:`\phi_k` is given as
.. math::
\phi_k(x) = \exp(ikx)
and an expansion is given as
.. math::
:label: u
u(x) = \sum_k \hat{u}_k \exp(ikx)
where
.. math::
k = -N/2, -N/2+1, ..., N/2-1
However, since :math:`\exp(ikx) = \exp(i(k \pm N)x)` this expansion can
also be written as an interpolator
.. math::
:label: u2
u(x) = \sum_k \frac{\hat{u}_k}{c_k} \exp(ikx)
where
.. math::
k = -N/2, -N/2+1, ..., N/2-1, N/2
and :math:`c_{N/2} = c_{-N/2} = 2`, whereas :math:`c_k = 1` for
:math:`k=-N/2+1, ..., N/2-1`. Furthermore,
:math:`\hat{u}_{N/2} = \hat{u}_{-N/2}`.
The interpolator form is used for computing odd derivatives. Otherwise,
it makes no difference and therefore :eq:`u` is used in transforms, since
this is the form expected by fftw.
The inner product is defined as
.. math::
(u, v) = \frac{1}{L} \int_{0}^{L} u \overline{v} dx
where :math:`\overline{v}` is the complex conjugate of :math:`v`, and
:math:`L` is the length of the (periodic) domain.
Parameters
----------
N : int
Number of quadrature points. Should be even for efficiency, but
this is not required.
padding_factor : float, optional
Factor for padding backward transforms. padding_factor=1.5
corresponds to a 3/2-rule for dealiasing.
domain : 2-tuple of floats, optional
The computational domain.
dtype : data-type, optional
dealias_direct : bool, optional
True for dealiasing using 2/3-rule. Must be used with
padding_factor = 1.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, padding_factor=1, domain=(0, 2*np.pi), dtype=float,
dealias_direct=False, coordinates=None):
self._k = None
self._planned_axes = None # Collapsing of axes means that this base can be used to plan transforms over several collapsed axes. Store the axes planned for here.
SpectralBase.__init__(self, N, dtype=dtype, padding_factor=padding_factor, dealias_direct=dealias_direct, domain=domain, coordinates=coordinates)
@staticmethod
def family():
return 'fourier'
@staticmethod
def boundary_condition():
return 'Periodic'
def points_and_weights(self, N=None, map_true_domain=False, weighted=True, **kw):
if N is None:
N = self.shape(False)
points = np.arange(N, dtype=float)*2*np.pi/N
if map_true_domain is True:
points = self.map_true_domain(points)
if weighted:
return points, np.array([1/N])
return points, np.array([2*np.pi/N])
def sympy_basis(self, i=0, x=sp.symbols('x', real=True)):
k = self.wavenumbers(False, False, False)
return sp.exp(sp.I*k[i]*x)
def weight(self, x=sp.symbols('x', real=True)):
return 1/(2*sp.pi)
def evaluate_basis(self, x=None, i=0, output_array=None):
if x is None:
x = self.mesh(False, False)
x = np.atleast_1d(x)
if output_array is None:
output_array = np.zeros(x.shape, dtype=complex)
if self._k is None:
self._k = self.wavenumbers(bcast=False)
k = self._k[i]
output_array[:] = np.exp(1j*x*k)
return output_array
def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):
output_array = self.evaluate_basis(x, i, output_array)
l = self._k[i]
output_array *= ((1j*l)**k)
return output_array
def vandermonde(self, x):
k = self.wavenumbers(bcast=False)
x = np.atleast_1d(x)
return np.exp(1j*x[:, np.newaxis]*k[np.newaxis, :])
def evaluate_basis_derivative_all(self, x=None, k=0):
V = self.evaluate_basis_all(x=x)
if k > 0:
l = self.wavenumbers(bcast=False, scaled=False, eliminate_highest_freq=False)
V = V*((1j*l)**k)[np.newaxis, :]
return V
# Reimplemented for efficiency (smaller array in *= when truncated)
def forward(self, input_array=None, output_array=None, fast_transform=True):
if fast_transform is False:
return SpectralBase.forward(self, input_array, output_array, False)
if input_array is not None:
self.forward.input_array[...] = input_array
self.forward.xfftn()
self._truncation_forward(self.forward.tmp_array,
self.forward.output_array)
M = self.get_normalization()
self.forward._output_array *= M
self.apply_inverse_mass(self.forward.output_array)
if output_array is not None:
output_array[...] = self.forward.output_array
return output_array
return self.forward.output_array
def apply_inverse_mass(self, array):
coors = self.tensorproductspace.coors if self.tensorproductspace else self.coors
if not coors.is_cartesian: # mass matrix may not be diagonal, or there is scaling
return SpectralBase.apply_inverse_mass(self, array)
return array
def _evaluate_scalar_product(self, fast_transform=True):
if fast_transform is False:
SpectralBase._evaluate_scalar_product(self)
return
output = self.scalar_product.xfftn()
output *= self.get_normalization()
def reference_domain(self):
return (0., 2*np.pi)
def sympy_reference_domain(self):
return (0, 2*sp.pi)
@property
def is_orthogonal(self):
return True
def get_orthogonal(self):
return self
def get_unplanned(self):
return self.__class__(self.N,
domain=self.domain,
padding_factor=1,
dealias_direct=False,
coordinates=self.coors.coordinates)
def get_dealiased(self, padding_factor=1.5, dealias_direct=False):
return self.__class__(self.N,
domain=self.domain,
padding_factor=padding_factor,
dealias_direct=dealias_direct,
coordinates=self.coors.coordinates)
def get_refined(self, N):
return self.__class__(N,
domain=self.domain,
padding_factor=self.padding_factor,
dealias_direct=self.dealias_direct,
coordinates=self.coors.coordinates)
def mask_nyquist(self, u_hat, mask=None):
"""Return array `u_hat` with zero Nyquist coefficients
Parameters
----------
u_hat : array
Array to be masked
mask : array or None, optional
mask array, if not provided then get the mask by calling
:func:`get_mask_nyquist`
"""
if mask is None:
mask = self.get_mask_nyquist(bcast=False)
if mask is not None:
u_hat *= mask
return u_hat
def plan(self, shape, axis, dtype, options):
if shape in (0, (0,)):
return
if isinstance(axis, int):
axis = [axis]
s = list(np.take(shape, axis))
if isinstance(self.forward, Transform):
if self.forward.input_array.shape == shape and axis == self._planned_axes:
# Already planned
return
plan_fwd = self._xfftn_fwd
plan_bck = self._xfftn_bck
self.axis = axis[-1]
self._planned_axes = axis
opts = dict(
overwrite_input='FFTW_DESTROY_INPUT',
planner_effort='FFTW_MEASURE',
threads=1,
)
opts.update(options)
threads = opts['threads']
flags = (fftw.flag_dict[opts['planner_effort']],
fftw.flag_dict[opts['overwrite_input']])
U = fftw.aligned(shape, dtype=dtype)
xfftn_fwd = plan_fwd(U, s=s, axes=axis, threads=threads, flags=flags)
V = xfftn_fwd.output_array
if np.issubdtype(dtype, np.floating):
flags = (fftw.flag_dict[opts['planner_effort']],)
xfftn_bck = plan_bck(V, s=s, axes=axis, threads=threads, flags=flags, output_array=U)
V.fill(0)
U.fill(0)
self._M = xfftn_fwd.get_normalization()
if self.padding_factor > 1.+1e-8:
trunc_array = self._get_truncarray(shape, V.dtype)
self.scalar_product = Transform(self.scalar_product, xfftn_fwd, U, V, trunc_array)
self.forward = Transform(self.forward, xfftn_fwd, U, V, trunc_array)
self.backward = Transform(self.backward, xfftn_bck, trunc_array, V, U)
else:
self.scalar_product = Transform(self.scalar_product, xfftn_fwd, U, V, V)
self.forward = Transform(self.forward, xfftn_fwd, U, V, V)
self.backward = Transform(self.backward, xfftn_bck, V, V, U)
self.si = islicedict(axis=self.axis, dimensions=self.dimensions)
self.sl = slicedict(axis=self.axis, dimensions=self.dimensions)
class R2C(FourierBase):
"""Fourier function space for real to complex transforms
Parameters
----------
N : int
Number of quadrature points. Should be even for efficiency, but
this is not required.
padding_factor : float, optional
Factor for padding backward transforms. padding_factor=1.5
corresponds to a 3/2-rule for dealiasing.
domain : 2-tuple of floats, optional
The computational domain.
dealias_direct : bool, optional
True for dealiasing using 2/3-rule. Must be used with
padding_factor = 1.
coordinates: 2- or 3-tuple (coordinate, position vector (, sympy assumptions)), optional
Map for curvilinear coordinatesystem.
The new coordinate variable in the new coordinate system is the first item.
Second item is a tuple for the Cartesian position vector as function of the
new variable in the first tuple. Example::
theta = sp.Symbols('x', real=True, positive=True)
rv = (sp.cos(theta), sp.sin(theta))
"""
def __init__(self, N, padding_factor=1., domain=(0., 2.*np.pi),
dealias_direct=False, coordinates=None):
FourierBase.__init__(self, N, padding_factor=padding_factor, dtype=float,
domain=domain, dealias_direct=dealias_direct,
coordinates=coordinates)
self.N = N
self._xfftn_fwd = fftw.rfftn
self._xfftn_bck = fftw.irfftn
self._sn = []
self._sm = []
self.plan((int(padding_factor*N),), (0,), float, {})
def wavenumbers(self, bcast=True, scaled=False, eliminate_highest_freq=False):
k = np.fft.rfftfreq(self.N, 1./self.N).astype(int)
if self.N % 2 == 0 and eliminate_highest_freq:
k[-1] = 0
if scaled:
k = k*self.domain_factor()
if bcast is True:
k = self.broadcast_to_ndims(k)
return k
def get_mask_nyquist(self, bcast=True):
"""Return None or an array with zeros for Nyquist coefficients and one otherwise
Parameters
----------
bcast : boolean, optional
If True then broadcast returned mask array to dimensions of the
:class:`TensorProductSpace` this base belongs to.
"""
if self.N % 2 == 0:
f = np.ones(self.N//2+1, dtype=int)
f[-1] = 0
else:
return None
if bcast is True:
f = self.broadcast_to_ndims(f)
return f
def _get_truncarray(self, shape, dtype):
shape = list(shape)
shape[self.axis] = int(shape[self.axis] / self.padding_factor)
shape[self.axis] = shape[self.axis]//2 + 1
return fftw.aligned(shape, dtype=dtype)
@staticmethod
def short_name():
return 'R2C'
def slice(self):
return slice(0, self.N//2+1)
def shape(self, forward_output=True):
if forward_output:
return self.N//2+1
return int(np.floor(self.padding_factor*self.N))
def _evaluate_expansion_all(self, input_array, output_array, x=None, fast_transform=True):
if fast_transform is False:
assert abs(self.padding_factor-1) < 1e-8
P = self.evaluate_basis_all(x=x)
if output_array.ndim == 1:
output_array[:] = np.dot(P, input_array).real
if self.N % 2 == 0:
output_array += np.conj(np.dot(P[:, 1:-1], input_array[1:-1])).real
else:
output_array += np.conj(np.dot(P[:, 1:], input_array[1:])).real
else:
fc = | np.moveaxis(input_array, self.axis, -2) | numpy.moveaxis |
import numpy as np
import matplotlib
import os
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.colors import hex2color
import utils
from utils.io import SMLAResultsReader
from datasets import brazil
if __name__ == '__main__':
# Whether to save figures. If false, figures are displayed only.
save_figs = True
# Location to save the figures (will be created if nonexistent)
figpath = 'figures/science'
# Figure format
fmt = 'png'
if fmt == 'pdf':
matplotlib.rc('pdf', fonttype=42)
# Figure DPI for raster formats
dpi = 200
# Paths to results files. Figures will be skipped if data cannot be found.
# Note that the legend is based off of the EO figure, so it will not be
# generated if EO data is unavailable.
eodds_path = 'results/science_brazil_eodds_0/science_brazil_eodds.h5'
di_path = 'results/science_brazil_di_0/science_brazil_di.h5'
dp_path = 'results/science_brazil_dp_0/science_brazil_dp.h5'
pe_path = 'results/science_brazil_pe_0/science_brazil_pe.h5'
eo_path = 'results/science_brazil_eo_0/science_brazil_eo.h5'
# Epsilon constants used in experiments
di_e = -0.80
dp_e = 0.15
eodds_e = 0.35
pe_e = 0.2
eo_e = 0.2
# Value of delta used in experiments
delta = 0.05
# Constants for rendering figures
n_total = brazil.load(gpa_cutoff=3.0).training_splits()[0].shape[0]
# Mapping from model names that will appear on legends
pprint_map = {
'SC' : 'SC',
'QSC' : 'QSC',
'FairlearnSVC' : 'FL',
'LinSVC' : 'SVC$_{linear}$',
'SGD' : 'SGD',
'SGD(hinge)' : 'SGD$_{hinge}$',
'SGD(log)' : 'SGD$_{log}$',
'SGD(perceptron)' : 'SGD$_{perc}$',
'SVC' : 'SVC$_{rbf}$',
'FairConst' : 'FC'
}
base_smla_names = ['SC', 'QSC']
base_bsln_names = ['SGD', 'LinSVC', 'SVC']
# Create the figure directory if nonexistent
if save_figs and not(os.path.isdir(figpath)):
os.makedirs(figpath)
#############
# Helpers #
#############
def save(fig, path, *args, **kwargs):
if not(os.path.isdir(figpath)):
os.makedirs(figpath)
path = os.path.join(figpath, path)
print('Saving figure to \'%s\'' % path)
fig.savefig(path, *args, **kwargs)
def get_ls(name):
if name == 'SC':
return ':'
elif name == 'QSC':
return '--'
return '-'
def get_lw(name):
if name == 'SC':
return 2
elif name == 'QSC':
return 2
return 1
def get_samples(results, dpct, e, include_fairlearn=False, include_fairconst=False):
''' Helper for filtering results files. '''
_smla_names = base_smla_names
_bsln_names = base_bsln_names.copy()
if include_fairlearn:
_bsln_names.append('FairlearnSVC')
if include_fairconst:
_bsln_names.append('FairConst')
# Get the SMLA samples
smla_samples = []
smla_names = []
psmla_names = []
for nm in _smla_names:
smla_names.append(nm)
psmla_names.append(pprint_map[nm])
sample = results.extract(['accept','loss_test', 'co_0_mean', 'data_pct'], name=nm, data_pct=dpct, e=e)
smla_samples.append(sample)
# get the baseline samples (note different versions of SGD)
bsln_samples = []
bsln_names = []
pbsln_names = []
for nm in _bsln_names:
if nm == 'SGD':
for loss in ['log','perceptron','hinge']:
bsln_names.append(nm)
pbsln_names.append(pprint_map[nm+('(%s)'%loss)])
sample = results.extract(['accept','loss_test', 'co_0_mean', 'data_pct'], name=nm, data_pct=dpct, loss=loss)
bsln_samples.append(sample)
elif nm == 'FairlearnSVC':
fl_e_vals = np.unique(results._store['method_parameters/FairlearnSVC']['fl_e'])
for fl_e in fl_e_vals:
bsln_names.append(nm + ('(%.2f)'%fl_e))
pbsln_names.append(pprint_map[nm] + ('$_{%.2f}$'%fl_e))
sample = results.extract(['accept','loss_test', 'co_0_mean', 'data_pct'], name=nm, data_pct=dpct, fl_e=fl_e)
bsln_samples.append(sample)
elif nm == 'FairConst':
cov_vals = np.unique(results._store['method_parameters/FairConst']['cov'])
for cov in cov_vals:
bsln_names.append(nm + ('(%.2f)'%cov))
pbsln_names.append(pprint_map[nm] + ('$_{%.2f}$'%cov))
sample = results.extract(['accept','loss_test', 'co_0_mean', 'data_pct'], name=nm, data_pct=dpct, cov=cov)
bsln_samples.append(sample)
elif nm == 'SVC':
continue
else:
bsln_names.append(nm)
pbsln_names.append(pprint_map[nm])
sample = results.extract(['accept','loss_test', 'co_0_mean', 'data_pct'], name=nm, data_pct=dpct)
bsln_samples.append(sample)
is_smla = np.array([True]*len(smla_names) + [False]*len(bsln_names))
return is_smla, (smla_names, psmla_names, smla_samples), (bsln_names, pbsln_names, bsln_samples)
def get_brazil_stats(path, e, include_fairlearn=False, include_fairconst=False):
''' Helper for extracting resutls from brazil results files. '''
results = SMLAResultsReader(path)
results.open()
dpcts = np.array(sorted(results.extract(['data_pct']).data_pct.unique()))
nvals = np.array(sorted(np.floor(dpcts * n_total).astype(int)))
is_smla, (smla_names, psmla_names, smla_samples), (bsln_names, pbsln_names, bsln_samples) = get_samples(results, dpcts.max(), e, include_fairlearn=include_fairlearn, include_fairconst=include_fairconst)
all_samples = smla_samples + bsln_samples
mnames = np.array(smla_names + bsln_names)
pmnames = np.array(psmla_names + pbsln_names)
# Compute statistics and close the results file
arates, arates_se = [], [] # Acceptance rates and SEs
frates, frates_se = [], [] # Failure rates ans SEs (rate that accepted solutions have g(theta) > 0 on the test set)
lrates, lrates_se = [], [] # Test set error and SEs
for _dpct in dpcts:
_, (_,_,_smla_samples), (_,_,_bsln_samples) = get_samples(results, _dpct, e, include_fairlearn=include_fairlearn, include_fairconst=include_fairconst)
_arates, _arates_se = [], []
_frates, _frates_se = [], []
_lrates, _lrates_se = [], []
for s in _smla_samples + _bsln_samples:
accepts = 1 * s.accept
_arates.append(np.mean(accepts))
_arates_se.append(np.std(accepts,ddof=1)/np.sqrt(len(accepts)))
failures = 1 * np.logical_and(s.co_0_mean>0, s.accept)
_frates.append(np.mean(failures))
_frates_se.append(np.std(failures,ddof=1)/np.sqrt(len(failures)))
if any(s.accept):
losses = s.loss_test[s.accept]
_lrates.append(np.mean(losses))
_lrates_se.append(np.std(losses,ddof=1)/np.sqrt(len(losses)))
else:
_lrates.append(np.nan)
_lrates_se.append(np.nan)
arates.append(_arates)
arates_se.append(_arates_se)
frates.append(_frates)
frates_se.append(_frates_se)
lrates.append(_lrates)
lrates_se.append(_lrates_se)
arates = np.array(arates)
arates_se = np.array(arates_se)
frates = np.array(frates)
frates_se = np.array(frates_se)
lrates = np.array(lrates)
lrates_se = | np.array(lrates_se) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Nanjing University, Vision Lab.
# Last update:
# 2019.10.27
# 2019.11.14
# 2020.11.26
import os
import time
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
import pandas as pd
import subprocess
import glob
import configparser
import argparse
import importlib
# from numba import cuda
tf.enable_eager_execution()
from process import preprocess, postprocess
# import models.model_voxception as model
from transform import compress_factorized, decompress_factorized
from transform import compress_hyper, decompress_hyper
from dataprocess.inout_bitstream import write_binary_files_factorized, read_binary_files_factorized
from dataprocess.inout_bitstream import write_binary_files_hyper, read_binary_files_hyper
os.environ['CUDA_VISIBLE_DEVICES']="0"
# set gpu.
cfg = tf.ConfigProto()
cfg.gpu_options.per_process_gpu_memory_fraction = 1.0
cfg.gpu_options.allow_growth = True
cfg.log_device_placement=True
# config.device_count={'gpu':0}
sess = tf.Session(config=cfg)
from myutils.pc_error_wrapper import pc_error
from myutils.pc_error_wrapper import get_points_number
def test_factorized(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix=''):
# Pre-process
cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
### Encoding
strings, min_v, max_v, shape = compress_factorized(cubes, model, ckpt_dir)
# Write files
filename = os.path.split(input_file)[-1][:-4]
print(filename)
rootdir = './compressed'+ postfix +'/'
bytes_strings, bytes_pointnums, bytes_cubepos = write_binary_files_factorized(
filename, strings.numpy(), points_numbers, cube_positions,
min_v.numpy(), max_v.numpy(), shape.numpy(), rootdir)
# Read files
strings_d, points_numbers_d, cube_positions_d, min_v_d, max_v_d, shape_d = \
read_binary_files_factorized(filename, rootdir)
# Decoding
cubes_d = decompress_factorized(strings_d, min_v_d, max_v_d, shape_d, model, ckpt_dir)
# bpp
N = get_points_number(input_file)
bpp = round(8*(bytes_strings + bytes_pointnums + bytes_cubepos)/float(N), 4)
bpp_strings = round(8*bytes_strings/float(N), 4)
bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
bpp_cubepos = round(8*bytes_cubepos/float(N), 4)
bpp_strings_hyper = 0
bpp_strings_head = 0
bpps = [bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums, bpp_cubepos]
return cubes_d, cube_positions_d, points_numbers_d, N, bpps
def test_hyper(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix=''):
# Pre-process
cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
### Encoding
y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_ds = compress_hyper(cubes, model, ckpt_dir, True)
# Write files
filename = os.path.split(input_file)[-1][:-4]
print(filename)
rootdir = './compressed'+ postfix +'/'
bytes_strings, bytes_strings_head, bytes_strings_hyper, bytes_pointnums, bytes_cubepos = write_binary_files_hyper(
filename, y_strings.numpy(), z_strings.numpy(), points_numbers, cube_positions,
y_min_vs.numpy(), y_max_vs.numpy(), y_shape.numpy(),
z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir)
# Read files
y_strings_d, z_strings_d, points_numbers_d, cube_positions_d, y_min_vs_d, y_max_vs_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d = \
read_binary_files_hyper(filename, rootdir)
# Decoding
cubes_d = decompress_hyper(y_strings_d, y_min_vs_d.astype('int32'), y_max_vs_d.astype('int32'),
y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, model, ckpt_dir)
# cheat!!!
##############
cubes_d = x_ds
##############
# bpp
N = get_points_number(input_file)
bpp = round(8*(bytes_strings + bytes_strings_head + bytes_strings_hyper +
bytes_pointnums + bytes_cubepos)/float(N), 4)
bpp_strings = round(8*bytes_strings/float(N), 4)
bpp_strings_hyper = round(8*bytes_strings_hyper/float(N), 4)
bpp_strings_head = round(8*bytes_strings_head/float(N), 4)
bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
bpp_cubepos = round(8*bytes_cubepos/float(N), 4)
bpps = [bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums, bpp_cubepos]
return cubes_d, cube_positions_d, points_numbers_d, N, bpps
def collect_results(results, results_d1, results_d2, bpps, N, scale, rho_d1, rho_d2):
# bpp
results["ori_points"] = N
results["scale"] = scale
# results["cube_size"] = cube_size
# results["res"] = res
results["bpp"] = bpps[0]
results["bpp_strings"] = bpps[1]
results["bpp_strings_hyper"] = bpps[2]
results["bpp_strings_head"] = bpps[3]
results["bpp_pointsnums"] = bpps[4]
results["bpp_cubepos"] = bpps[5]
results["rho_d1"] = rho_d1
results["optimal D1 PSNR"] = results_d1["mseF,PSNR (p2point)"]
results["rho_d2"] = rho_d2
results["optimal D2 PSNR"] = results_d2["mseF,PSNR (p2plane)"]
print(results)
return results
def plot_results(all_results, filename, root_dir):
fig, ax = plt.subplots(figsize=(7.3, 4.2))
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["mseF,PSNR (p2point)"][:]),
label="D1", marker='x', color='red')
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["mseF,PSNR (p2plane)"][:]),
label="D2", marker='x', color = 'blue')
plt.plot(np.array(all_results["bpp"][:]), | np.array(all_results["optimal D1 PSNR"][:]) | numpy.array |
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, \
create_string_buffer, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
from .mesh import _get_mesh
__all__ = [
'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter',
'CellInstanceFilter', 'CollisionFilter', 'DistribcellFilter', 'DelayedGroupFilter',
'EnergyFilter', 'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter',
'MaterialFilter', 'MeshFilter', 'MeshSurfaceFilter', 'MuFilter', 'ParticleFilter',
'PolarFilter', 'SphericalHarmonicsFilter', 'SpatialLegendreFilter', 'SurfaceFilter',
'UniverseFilter', 'ZernikeFilter', 'ZernikeRadialFilter', 'filters'
]
# Tally functions
_dll.openmc_cell_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_filter_get_bins.restype = c_int
_dll.openmc_cell_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)]
_dll.openmc_energy_filter_get_bins.restype = c_int
_dll.openmc_energy_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_double)]
_dll.openmc_energy_filter_set_bins.restype = c_int
_dll.openmc_energy_filter_set_bins.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.restype = c_int
_dll.openmc_energyfunc_filter_set_data.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.argtypes = [
c_int32, c_size_t, POINTER(c_double), POINTER(c_double)]
_dll.openmc_energyfunc_filter_get_energy.resttpe = c_int
_dll.openmc_energyfunc_filter_get_energy.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_energy.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_energyfunc_filter_get_y.resttpe = c_int
_dll.openmc_energyfunc_filter_get_y.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_y.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_filter_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_filter_get_id.restype = c_int
_dll.openmc_filter_get_id.errcheck = _error_handler
_dll.openmc_filter_get_type.argtypes = [c_int32, c_char_p]
_dll.openmc_filter_get_type.restype = c_int
_dll.openmc_filter_get_type.errcheck = _error_handler
_dll.openmc_filter_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_filter_set_id.restype = c_int
_dll.openmc_filter_set_id.errcheck = _error_handler
_dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_filter_index.restype = c_int
_dll.openmc_get_filter_index.errcheck = _error_handler
_dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_legendre_filter_get_order.restype = c_int
_dll.openmc_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_legendre_filter_set_order.restype = c_int
_dll.openmc_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_material_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_size_t)]
_dll.openmc_material_filter_get_bins.restype = c_int
_dll.openmc_material_filter_get_bins.errcheck = _error_handler
_dll.openmc_material_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_int32)]
_dll.openmc_material_filter_set_bins.restype = c_int
_dll.openmc_material_filter_set_bins.errcheck = _error_handler
_dll.openmc_mesh_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_mesh_filter_get_mesh.restype = c_int
_dll.openmc_mesh_filter_get_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_mesh_filter_set_mesh.restype = c_int
_dll.openmc_mesh_filter_set_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_meshsurface_filter_get_mesh.restype = c_int
_dll.openmc_meshsurface_filter_get_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_meshsurface_filter_set_mesh.restype = c_int
_dll.openmc_meshsurface_filter_set_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_get_translation.argtypes = [c_int32, POINTER(c_double*3)]
_dll.openmc_mesh_filter_get_translation.restype = c_int
_dll.openmc_mesh_filter_get_translation.errcheck = _error_handler
_dll.openmc_mesh_filter_set_translation.argtypes = [c_int32, POINTER(c_double*3)]
_dll.openmc_mesh_filter_set_translation.restype = c_int
_dll.openmc_mesh_filter_set_translation.errcheck = _error_handler
_dll.openmc_new_filter.argtypes = [c_char_p, POINTER(c_int32)]
_dll.openmc_new_filter.restype = c_int
_dll.openmc_new_filter.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_spatial_legendre_filter_get_order.restype = c_int
_dll.openmc_spatial_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_spatial_legendre_filter_set_order.restype = c_int
_dll.openmc_spatial_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_sphharm_filter_get_order.restype = c_int
_dll.openmc_sphharm_filter_get_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_sphharm_filter_set_order.restype = c_int
_dll.openmc_sphharm_filter_set_order.errcheck = _error_handler
_dll.openmc_zernike_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_zernike_filter_get_order.restype = c_int
_dll.openmc_zernike_filter_get_order.errcheck = _error_handler
_dll.openmc_zernike_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_zernike_filter_set_order.restype = c_int
_dll.openmc_zernike_filter_set_order.errcheck = _error_handler
_dll.tally_filters_size.restype = c_size_t
class Filter(_FortranObjectWithID):
__instances = WeakValueDictionary()
def __new__(cls, obj=None, uid=None, new=True, index=None):
mapping = filters
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A filter with ID={} has already '
'been allocated.'.format(uid))
# Set the filter type -- note that the filter_type attribute
# only exists on subclasses!
index = c_int32()
_dll.openmc_new_filter(cls.filter_type.encode(), index)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
filter_id = c_int32()
_dll.openmc_filter_get_id(self._index, filter_id)
return filter_id.value
@id.setter
def id(self, filter_id):
_dll.openmc_filter_set_id(self._index, filter_id)
class EnergyFilter(Filter):
filter_type = 'energy'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
energies = POINTER(c_double)()
n = c_size_t()
_dll.openmc_energy_filter_get_bins(self._index, energies, n)
return | as_array(energies, (n.value,)) | numpy.ctypeslib.as_array |
import sys
import numpy as np
import scipy.sparse as sp
from ctypes import c_int, byref
from numpy.ctypeslib import ndpointer
import time
import qutip.settings as qset
# Load solver functions from mkl_lib
pardiso = qset.mkl_lib.pardiso
pardiso_delete = qset.mkl_lib.pardiso_handle_delete
if sys.maxsize > 2**32: # Running 64-bit
pardiso_64 = qset.mkl_lib.pardiso_64
pardiso_delete_64 = qset.mkl_lib.pardiso_handle_delete_64
def _pardiso_parameters(hermitian, has_perm,
max_iter_refine,
scaling_vectors,
weighted_matching):
iparm = np.zeros(64, dtype=np.int32)
iparm[0] = 1 # Do not use default values
iparm[1] = 3 # Use openmp nested dissection
if has_perm:
iparm[4] = 1
iparm[7] = max_iter_refine # Max number of iterative refinements
if hermitian:
iparm[9] = 8
else:
iparm[9] = 13
if not hermitian:
iparm[10] = int(scaling_vectors)
iparm[12] = int(weighted_matching) # Non-symmetric weighted matching
iparm[17] = -1
iparm[20] = 1
iparm[23] = 1 # Parallel factorization
iparm[26] = 0 # Check matrix structure
iparm[34] = 1 # Use zero-based indexing
return iparm
# Set error messages
pardiso_error_msgs = {
'-1': 'Input inconsistant',
'-2': 'Out of memory',
'-3': 'Reordering problem',
'-4':
'Zero pivot, numerical factorization or iterative refinement problem',
'-5': 'Unclassified internal error',
'-6': 'Reordering failed',
'-7': 'Diagonal matrix is singular',
'-8': '32-bit integer overflow',
'-9': 'Not enough memory for OOC',
'-10': 'Error opening OOC files',
'-11': 'Read/write error with OOC files',
'-12': 'Pardiso-64 called from 32-bit library',
}
def _default_solver_args():
return {
'hermitian': False,
'posdef': False,
'max_iter_refine': 10,
'scaling_vectors': True,
'weighted_matching': True,
'return_info': False,
}
class mkl_lu:
"""
Object pointing to LU factorization of a sparse matrix
generated by mkl_splu.
Methods
-------
solve(b, verbose=False)
Solve system of equations using given RHS vector 'b'.
Returns solution ndarray with same shape as input.
info()
Returns the statistics of the factorization and
solution in the lu.info attribute.
delete()
Deletes the allocated solver memory.
"""
def __init__(self, np_pt=None, dim=None, is_complex=None, data=None,
indptr=None, indices=None, iparm=None, np_iparm=None,
mtype=None, perm=None, np_perm=None, factor_time=None):
self._np_pt = np_pt
self._dim = dim
self._is_complex = is_complex
self._data = data
self._indptr = indptr
self._indices = indices
self._iparm = iparm
self._np_iparm = np_iparm
self._mtype = mtype
self._perm = perm
self._np_perm = np_perm
self._factor_time = factor_time
self._solve_time = None
def solve(self, b, verbose=None):
b_shp = b.shape
if b.ndim == 2 and b.shape[1] == 1:
b = b.ravel()
nrhs = 1
elif b.ndim == 2 and b.shape[1] != 1:
nrhs = b.shape[1]
b = b.ravel(order='F')
else:
b = b.ravel()
nrhs = 1
data_type = np.complex128 if self._is_complex else np.float64
if b.dtype != data_type:
b = b.astype(np.complex128, copy=False)
# Create solution array (x) and pointers to x and b
x = np.zeros(b.shape, dtype=data_type, order='C')
np_x = x.ctypes.data_as(ndpointer(data_type, ndim=1, flags='C'))
np_b = b.ctypes.data_as(ndpointer(data_type, ndim=1, flags='C'))
error = np.zeros(1, dtype=np.int32)
np_error = error.ctypes.data_as(ndpointer(np.int32, ndim=1, flags='C'))
# Call solver
_solve_start = time.time()
pardiso(
self._np_pt,
byref(c_int(1)),
byref(c_int(1)),
byref(c_int(self._mtype)),
byref(c_int(33)),
byref(c_int(self._dim)),
self._data,
self._indptr,
self._indices,
self._np_perm,
byref(c_int(nrhs)),
self._np_iparm,
byref(c_int(0)),
np_b,
np_x,
np_error,
)
self._solve_time = time.time() - _solve_start
if error[0] != 0:
raise Exception(pardiso_error_msgs[str(error[0])])
if verbose:
print('Solution Stage')
print('--------------')
print('Solution time: ',
round(self._solve_time, 4))
print('Solution memory (Mb): ',
round(self._iparm[16]/1024, 4))
print('Number of iterative refinements:',
self._iparm[6])
print('Total memory (Mb): ',
round(sum(self._iparm[15:17])/1024, 4))
print()
return np.reshape(x, b_shp, order=('C' if nrhs == 1 else 'F'))
def info(self):
info = {'FactorTime': self._factor_time,
'SolveTime': self._solve_time,
'Factormem': round(self._iparm[15]/1024, 4),
'Solvemem': round(self._iparm[16]/1024, 4),
'IterRefine': self._iparm[6]}
return info
def delete(self):
# Delete all data
error = np.zeros(1, dtype=np.int32)
np_error = error.ctypes.data_as(ndpointer(np.int32, ndim=1, flags='C'))
pardiso(
self._np_pt,
byref(c_int(1)),
byref(c_int(1)),
byref(c_int(self._mtype)),
byref(c_int(-1)),
byref(c_int(self._dim)),
self._data,
self._indptr,
self._indices,
self._np_perm,
byref(c_int(1)),
self._np_iparm,
byref(c_int(0)),
byref(c_int(0)),
byref(c_int(0)),
np_error,
)
if error[0] == -10:
raise Exception('Error freeing solver memory')
_MATRIX_TYPE_NAMES = {
4: 'Complex Hermitian positive-definite',
-4: 'Complex Hermitian indefinite',
2: 'Real symmetric positive-definite',
-2: 'Real symmetric indefinite',
11: 'Real non-symmetric',
13: 'Complex non-symmetric',
}
def _mkl_matrix_type(dtype, solver_args):
if not solver_args['hermitian']:
return 13 if dtype == np.complex128 else 11
out = 4 if dtype == np.complex128 else 2
return out if solver_args['posdef'] else -out
def mkl_splu(A, perm=None, verbose=False, **kwargs):
"""
Returns the LU factorization of the sparse matrix A.
Parameters
----------
A : csr_matrix
Sparse input matrix.
perm : ndarray (optional)
User defined matrix factorization permutation.
verbose : bool {False, True}
Report factorization details.
Returns
-------
lu : mkl_lu
Returns object containing LU factorization with a
solve method for solving with a given RHS vector.
"""
if not sp.isspmatrix_csr(A):
raise TypeError('Input matrix must be in sparse CSR format.')
if A.shape[0] != A.shape[1]:
raise Exception('Input matrix must be square')
dim = A.shape[0]
solver_args = _default_solver_args()
if set(kwargs) - set(solver_args):
raise ValueError(
"Unknown keyword arguments pass to mkl_splu: {!r}"
.format(set(kwargs) - set(solver_args))
)
solver_args.update(kwargs)
# If hermitian, then take upper-triangle of matrix only
if solver_args['hermitian']:
B = sp.triu(A, format='csr')
A = B # This gets around making a full copy of A in triu
is_complex = bool(A.dtype == np.complex128)
if not is_complex:
A = sp.csr_matrix(A, dtype=np.float64, copy=False)
data_type = A.dtype
# Create pointer to internal memory
pt = np.zeros(64, dtype=int)
np_pt = pt.ctypes.data_as(ndpointer(int, ndim=1, flags='C'))
# Create pointers to sparse matrix arrays
data = A.data.ctypes.data_as( | ndpointer(data_type, ndim=1, flags='C') | numpy.ctypeslib.ndpointer |
"""
part2.ipynb
Automatically generated by Colaboratory.
Original file is located at https://colab.research.google.com/drive/1Y7UoHD_lSTrIXDF3iy3j09a7YhH6lcwd
# Information
Authors: <NAME> (SSP210009) and <NAME> (PXS200095)
Dataset Owner/Donor Information:
Name: Prof. <NAME>
Institutions: Department of Civil Engineering, Tamkang University, Taiwan
Email: <EMAIL>
TEL: 886-2-26215656 ext. 3181
Date Donated: Aug. 18, 2018
Dataset Information:
The market historical data set of real estate valuation are collected from Sindian Dist., New Taipei City, Taiwan. The real estate valuation is a regression problem. The data set was randomly split into the training data set (2/3 samples) and the testing data set (1/3 samples).
Target Variable:
Y = House price of unit area (10000 New Taiwan Dollar/Ping, where Ping is a local unit, 1 Ping = 3.3 meter squared)
References:
1) https://scikit-learn.org/stable/modules/classes.html
2) https://numpy.org/doc/stable/reference/
3) https://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set
4) https://www.kaggle.com/dskagglemt/real-estate-valuation-using-linearsvr
5) https://medium.com/@powusu381/multiple-regression-in-python-using-scikit-learn-predicting-the-miles-per-gallon-mpg-of-cars-4c8e512234be
"""
# Commented out IPython magic to ensure Python compatibility.
# Library Imports
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as matplt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import explained_variance_score
# %matplotlib inline
# Dataset Loading
dataframe = pd.DataFrame(pd.read_excel("https://github.com/Shreyans1602/Machine_Learning_Linear_Regression/raw/main/Dataset.xlsx", sheet_name = 'Dataset', index_col = 'No'))
# Loaded Successfully
print("Data Loaded Successfully")
print("Real Estate Valuation Data Set has {} data points with {} variables each.".format(*dataframe.shape))
# Pre-Processing Stage
print("Pre-Processing the Data:\n")
# Check for null values in the dataframe
print("Null entries found?:", ("No\n" if dataframe.isnull().sum().sum() == 0 else "Yes\n"))
# Check for duplicate values in the dataframe
print("Duplicate entries found?:", ("No\n" if dataframe.duplicated().sum() == 0 else "Yes\n"))
# Check if there is any categorical values
print("Check for categorical values:")
print(dataframe.dtypes)
# Rename attributes and describe the dataframe
dataframe.rename(
columns = {
"X1 transaction date": "Transaction_Date",
"X2 house age": "House_Age",
"X3 distance to the nearest MRT station": "MRT_Distance",
"X4 number of convenience stores": "Num_Stores_NearBy",
"X5 latitude": "Latitude",
"X6 longitude": "Longitude",
"Y house price of unit area": "House_Price",
},
inplace = True
)
print("\nRenaming the attributes for convenience. The dataframe is as follows:\n")
print(dataframe.head())
print("\nDescription of the dataframe is as follows:")
print(dataframe.describe())
# Printing correlation matrix
print("\nCorrelation matrix is as follows:")
print(dataframe.corr())
# Show the impact of different attributes on the House_Price variable
print("\nMost impactful attributes on House_Price variable are shows below in decending order:")
print(abs(dataframe.corr())['House_Price'].sort_values(ascending = False))
# Show various plots for visualization of the above information. Un-Comment lines 97 to 108 to see the plots.
# sns.set(rc = {'figure.figsize':(18,10)})
# hmap = sns.heatmap(dataframe.corr(), vmin = -1, vmax = 1)
# # Checking the correlation of all the attributes vs the House_Price variable
# sns.barplot(y = dataframe.corr().loc['House_Price'].index, x = dataframe.corr().loc['House_Price'].values)
# # Show plots for effect of each variable on House_Price
# columns = dataframe.columns
# for i in range(len(columns) - 1):
# matplt.figure(i)
# sns.scatterplot(x = columns[i], y = 'House_Price', data = dataframe)
# Based on the above heatmap, correlation scatter and bar graphs
# High Correlation Attributes w.r.t target are Distance, Num_Stores_NearBy, Latitude, Longitude, House_Age.
# Neligible Correlation Attributes w.r.t target is No and Transaction_Date.
# Dropping the insignificant attributes from the data set
dataframe = dataframe.drop(['Transaction_Date'], axis = 1)
print(dataframe.columns)
# Prepare X and Y matrix
X = np.array(dataframe.drop(['House_Price'], axis = 1))
Y = np.array(dataframe['House_Price'])
# Split train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = 99)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# Scaling the data set
std_scaler = StandardScaler()
std_scaler.fit(X_train)
X_train_scaled = std_scaler.transform(X_train)
X_test_scaled = std_scaler.transform(X_test)
# Train model with different parameters and log the results
# Training Parameters
train_learning_rates = [0.1, 0.01, 0.001]
train_iterations = [100, 120, 140, 160, 180, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
config_count = 1
# Training Results Arrays
mse_arr = []
rmse_arr = []
r2_arr = []
ev_arr = []
# Open Log File
log_file = open("logs_library_implementation.txt","w")
for i in train_learning_rates:
for j in train_iterations:
# Skip iterations less than or equal to 200 for learning rate = 0.001 to avoid failure in convergence warning
if i == 0.001:
if j <= 200:
continue
# Learning Rate as i and the max_iterations as j
lrsgd = SGDRegressor(eta0 = i, max_iter = j, random_state = 99)
lrsgd.fit(X_train_scaled, np.array(Y_train))
Y_pred = lrsgd.predict(X_test_scaled)
# Evaluate Performance
mse = mean_squared_error(Y_test, Y_pred)
rmse = np.sqrt(mse)
r2 = r2_score(Y_test, Y_pred)
ev = explained_variance_score(Y_test, Y_pred)
# Store Results
mse_arr.append(mse)
rmse_arr.append(rmse)
r2_arr.append(r2)
ev_arr.append(ev)
# Log Data
log_file.write("Run: " + str(config_count) + " || MSE: " + str(mse) + " || R^2 Score: " + str(r2) + " || Explained Variance Score: " + str(ev) + " || Learning Rate: " + str(i) + "|| Iterations: " + str(j) + "\n")
config_count += 1
best_param_idx = mse_arr.index(min(mse_arr))
print("\nEvaluation Parameters:\n")
print("Best Performance Results:")
print("MSE:", mse_arr[best_param_idx])
print("RMSE:", rmse_arr[best_param_idx])
print("R^2 Score:", r2_arr[best_param_idx])
print("Explained Variance Score: ", ev_arr[best_param_idx])
print("\nBest Parameters for Model Training:")
print("Learning Rate:", train_learning_rates[(best_param_idx % len(train_learning_rates))])
print("Iterations:", train_iterations[(best_param_idx % len(train_iterations))])
log_file.close()
# Train model with same parameters as manual implementation
# Model Training Stage
print("\nTraining model with same parameters as manual implementation:")
lrsgd = SGDRegressor(eta0 = 0.01, max_iter = 100, random_state = 99)
lrsgd.fit(X_train_scaled, np.array(Y_train))
Y_pred = lrsgd.predict(X_test_scaled)
# Evaluate Performance
mse = mean_squared_error(Y_test, Y_pred)
rmse = np.sqrt(mse)
r2 = r2_score(Y_test, Y_pred)
ev = explained_variance_score(Y_test, Y_pred)
print("\nEvaluation Parameters:\n")
print("Performance Results:")
print("MSE:", mse)
print("RMSE:", rmse)
print("R^2 Score:", r2)
print("Explained Variance Score: ", ev)
# Train model with default library parameters
# Model Training Stage
print("\nTraining model with default library parameters:")
lrsgd = SGDRegressor()
lrsgd.fit(X_train_scaled, np.array(Y_train))
Y_pred = lrsgd.predict(X_test_scaled)
# Evaluate Performance
mse = mean_squared_error(Y_test, Y_pred)
rmse = | np.sqrt(mse) | numpy.sqrt |
import numpy as np
import matplotlib.pyplot as plt; plt.ioff()
import copy
from .class_utils import *
from .utils import *
from astropy.cosmology import Planck15
import astropy.constants as co
c = co.c.value # speed of light, in m/s
G = co.G.value # gravitational constant in SI units
Msun = co.M_sun.value # solar mass, in kg
Mpc = 1e6*co.pc.value # 1 Mpc, in m
arcsec2rad = np.pi/(180.*3600.)
rad2arcsec =3600.*180./np.pi
deg2rad = np.pi/180.
rad2deg = 180./np.pi
__all__ = ['LensRayTrace','GenerateLensingGrid','thetaE','get_caustics','CausticsSIE']
def LensRayTrace(xim,yim,lens,Dd,Ds,Dds):
"""
Wrapper to pass off lensing calculations to any number of functions
defined below, accumulating lensing offsets from multiple lenses
and shear as we go.
"""
# Ensure lens is a list, for convenience
lens = list(np.array([lens]).flatten())
ximage = xim.copy()
yimage = yim.copy()
for i,ilens in enumerate(lens):
if ilens.__class__.__name__ == 'SIELens': ilens.deflect(xim,yim,Dd,Ds,Dds)
elif ilens.__class__.__name__ == 'ExternalShear': ilens.deflect(xim,yim,lens[0])
ximage += ilens.deflected_x; yimage += ilens.deflected_y
return ximage,yimage
def GenerateLensingGrid(data=None,xmax=None,emissionbox=[-5,5,-5,5],fieldres=None,emitres=None):
"""
Routine to generate two grids for lensing. The first will be a lower-resolution
grid with resolution determined by fieldres and size determined
by xmax. The second is a much higher resolution grid which will be used for
the lensing itself, with resolution determined by emitres and size
determined from emissionbox - i.e., emissionbox should contain the coordinates
which conservatively encompass the real emission, so we only have to lens that part
of the field at high resolution.
Since we're going to be FFT'ing with these coordinates, the resolution isn't
directly set-able. For the low-res full-field map, it instead is set to the next-higher
power of 2 from what would be expected from having ~4 resolution elements across
the synthesized beam.
Inputs:
data:
A Visdata object, used to determine the resolutions of
the two grids (based on the image size or maximum uvdistance in the dataset)
xmax:
Field size for the low-resolution grid in arcsec, which will extend from
(-xmax,-xmax) to (+xmax,+xmax), e.g. (-30,-30) to (+30,+30)arcsec. Should be
at least a bit bigger than the primary beam. Not needed for images.
emissionbox:
A 1x4 list of [xmin,xmax,ymin,ymax] defining a box (in arcsec) which contains
the source emission. Coordinates should be given in arcsec relative to the
pointing/image center.
fieldres,emitres:
Resolutions of the coarse, full-field and fine (lensed) field, in arcsec.
If not given, suitable values will be calculated from the visibilities.
fieldres is unnecessary for images.
Returns:
If there are any Visdata objects in the datasets, returns:
xmapfield,ymapfield:
2xN matrices containing x and y coordinates for the full-field, lower-resolution
grid, in arcsec.
xmapemission,ymapemission:
2xN matrices containing x and y coordinates for the smaller, very high resolution
grid, in arcsec.
indices:
A [4x1] array containing the indices of xmapfield,ymapfield which overlap with
the high resolution grid.
"""
# Factors higher-resolution than (1/2*max(uvdist)) to make the field and emission grids
Nover_field = 4.
Nover_emission = 8.
# Allow multiple visdata objects to be passed, pick the highest resolution point of all
uvmax = 0.
try:
for vis in data:
uvmax = max(uvmax,vis.uvdist.max())
except TypeError:
uvmax = data.uvdist.max()
# Calculate resolutions of the grids
if fieldres is None: fieldres = (2*Nover_field*uvmax)**-1.
else: fieldres *= arcsec2rad
if emitres is None: emitres = (2*Nover_emission*uvmax)**-1.
else: emitres *= arcsec2rad
# Calculate the field grid size as a power of 2.
Nfield = 2**np.ceil(np.log2(2*np.abs(xmax)*arcsec2rad/fieldres))
# Calculate the grid coordinates for the larger field.
fieldcoords = np.linspace(- | np.abs(xmax) | numpy.abs |
import os
import torch
import numpy as np
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from arcface.resnet import ResNet
from arcface.googlenet import GoogLeNet
from arcface.inception_v4 import InceptionV4
from arcface.inceptionresnet_v2 import InceptionResNetV2
from arcface.densenet import DenseNet
from arcface.resnet_cbam import ResNetCBAM
import torchvision.transforms as transforms
import cv2
import random
import jieba
from autoaugment import rand_augment_transform
from PIL import Image
'''
for image-text match
'''
class ITMatchTrain(Dataset):
def __init__(self, opt):
arcfaceDataset = ArcfaceDataset(root_dir=opt.data_path, mode="train", size=(opt.size, opt.size), imgORvdo='video')
batch_size = 256
training_params = {"batch_size": batch_size,
"shuffle": False,
"drop_last": False,
"num_workers": opt.workers}
arcfaceLoader = DataLoader(arcfaceDataset, **training_params)
self.vocab_size = arcfaceDataset.vocab_size
if opt.network == 'resnet':
model = ResNet(opt)
b_name = opt.network+'_'+opt.mode+'_{}'.format(opt.num_layers_r)
elif opt.network == 'googlenet':
model = GoogLeNet(opt)
b_name = opt.network
elif opt.network == 'inceptionv4':
model = InceptionV4(opt)
b_name = opt.network
elif opt.network == 'inceptionresnetv2':
model = InceptionResNetV2(opt)
b_name = opt.network
elif opt.network == 'densenet':
model = DenseNet(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_d)
elif opt.network == 'resnet_cbam':
model = ResNetCBAM(opt)
b_name = opt.network+'_{}'.format(opt.num_layers_c)
else:
raise RuntimeError('Cannot Find the Model: {}'.format(opt.network))
model.load_state_dict(torch.load(os.path.join(opt.saved_path, b_name+'.pth')))
model.cuda()
model.eval()
self.model_name = b_name
self.features = torch.zeros((len(arcfaceDataset), opt.embedding_size))
self.texts = torch.zeros((len(arcfaceDataset), 64)).long()
self.instances = torch.zeros((len(arcfaceDataset))).long()
print('Calculating features...')
for i, d in enumerate(tqdm(arcfaceLoader)):
# img = d['img'].cuda()
text = d['text']
instance = d['instance']
# with torch.no_grad():
# feature = model(img).cpu()
# self.features[i*batch_size:(i+1)*batch_size] = feature
self.texts[i*batch_size:(i+1)*batch_size] = text
self.instances[i*batch_size:(i+1)*batch_size] = instance
def __len__(self):
return self.texts.size(0)
def __getitem__(self, index):
text = self.texts[index]
# feature = self.features[index]
feature = None
instance = self.instances[index]
# return {'feature': feature, 'text':text, 'instance':instance}
return {'text':text, 'instance':instance}
class ITMatchValidation(Dataset):
def __init__(self, size=(224, 224), root_dir='data/validation_instance/', maxLen=64, PAD=0, imgORvdo='video'):
self.root_dir = root_dir
self.size = size
text2num = Text2Num(maxLen=maxLen, root_dir='data', PAD=PAD)
self.vocab_size = text2num.vocab_size
assert imgORvdo in ['image', 'video']
tat = 'validation_'+imgORvdo+'s'
# tat = 'train_'+imgORvdo+'s'
with open(os.path.join('data', tat+'_text.json'), 'r') as f:
textDic = json.load(f)
for k in textDic.keys():
textDic[k] = text2num(textDic[k])
instances = os.listdir(root_dir)
self.items = []
print('Loading Data...')
for instance in tqdm(instances):
imgs = os.listdir(root_dir+instance)
l = []
for img in imgs:
if imgORvdo in img:
l.append(os.path.join(instance, img))
text_name = img.split(instance)[-1].split('_')[0]
l.append(textDic[text_name])
break
if len(l) < 2:
continue
self.items.append(l)
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.items)
def __getitem__(self, index):
imgPath, text = self.items[index]
text = torch.Tensor(text).long()
# img = np.load(os.path.join(self.root_dir, imgPath))
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
hi, wi, ci = img.shape
rh = (hi-self.size[0])//2
rw = (wi-self.size[1])//2
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {
'img': img,
'text': text
}
'''
for text
'''
class Text2Num:
def __init__(self, maxLen, root_dir='data', PAD=0):
with open(os.path.join(root_dir, 'vocab.json'), 'r') as f:
self.vocab = json.load(f)
self.PAD = PAD
self.maxLen = maxLen
self.vocab_size = len(self.vocab)
def __call__(self, text):
words = jieba.cut(text, cut_all=False, HMM=True)
# l = [len(self.vocab)]# CLS
l = []
for w in words:
if w.strip() in self.vocab:
l.append(self.vocab[w.strip()])
if len(l) > self.maxLen:
l = l[:self.maxLen]
elif len(l) < self.maxLen:
l += [self.PAD]*(self.maxLen-len(l))
assert len(l) == self.maxLen
return l
'''
for efficientdet
'''
class EfficientdetDataset(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='all', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['image', 'video', 'all']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
if imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
else:
tats = [mode + '_images', mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if len(d['annotations']) == 0:
continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.images.append(t)
# print(len(self.images))
# self.images = self.images[:1000]
print('Done')
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
if self.transform:
sample = self.transform(sample)
return sample
def label2index(self, label):
return self.labelDic['label2index'][label]
def index2label(self, index):
return self.labelDic['index2label'][str(index)]
def getImagePath(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
return imgPath
def getImageInfo(self, index):
imgPath, annotationsList, imgName, t = self.images[index]
imgID, frame = imgName[:-4].split('_')
return imgPath, imgID, frame
class EfficientdetDatasetVideo(Dataset):
def __init__(self, root_dir='data', mode='train', imgORvdo='video', transform=None, maxLen=64, PAD=0):
assert mode in ['train', 'validation']
assert imgORvdo in ['video']
self.root_dir = root_dir
self.transform = transform
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
label_file = 'label.json'
with open(os.path.join(root_dir, label_file), 'r') as f:
self.labelDic = json.load(f)
self.num_classes = len(self.labelDic['label2index'])
tats = [mode + '_videos']
self.textDic = {}
ds = []
for t in tats:
with open(os.path.join(root_dir, t+'_annotation.json'), 'r') as f:
ds.append(json.load(f))
with open(os.path.join(root_dir, t+'_text.json'), 'r') as f:
self.textDic[t] = json.load(f)
for k in self.textDic.keys():
for kk in self.textDic[k].keys():
self.textDic[k][kk] = text2num(self.textDic[k][kk])
ls = [d['annotations'] for d in ds]
self.images = []
self.videos = {}
print('Loading {} {} data...'.format(mode, imgORvdo))
for i, l in enumerate(ls):
for d in l:
if d['img_name'][:6] not in self.videos:
self.videos[d['img_name'][:6]] = []
# if len(d['annotations']) == 0:
# continue
t = []
t.append(os.path.join(tats[i], d['img_name']))
t.append(d['annotations'])
t.append(d['img_name'])
t.append(tats[i])
self.videos[d['img_name'][:6]].append(t)
# self.images.append(t)
self.videos = list(self.videos.values())
for l in self.videos:
assert len(l) == 10
# print(len(self.images))
self.videos = self.videos[:100]
print('Done')
def __len__(self):
return len(self.videos)
def __getitem__(self, index):
lst = self.videos[index]
datas = []
for imgPath, annotationsList, imgName, t in lst:
# imgPath, annotationsList, imgName, t = self.images[index]
text_name = imgName.split('_')[0]
text = self.textDic[t][text_name]
text = torch.Tensor(text).long()
img = cv2.imread(os.path.join(self.root_dir, imgPath))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255
annotations = np.zeros((len(annotationsList), 6))
for i, annotationDic in enumerate(annotationsList):
annotation = np.zeros((1, 6))
annotation[0, :4] = annotationDic['box']
annotation[0, 4] = annotationDic['label']
if annotationDic['instance_id'] > 0:
annotation[0, 5] = 1
else:
annotation[0, 5] = 0
annotations[i:i+1, :] = annotation
# annotations = np.append(annotations, annotation, axis=0)
sample = {'img': img, 'annot': annotations, 'text': text}
datas.append(sample)
if self.transform:
datas = self.transform(datas)
return datas
# def label2index(self, label):
# return self.labelDic['label2index'][label]
# def index2label(self, index):
# return self.labelDic['index2label'][str(index)]
# def getImagePath(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# return imgPath
# def getImageInfo(self, index):
# imgPath, annotationsList, imgName, t = self.images[index]
# imgID, frame = imgName[:-4].split('_')
# return imgPath, imgID, frame
'''
for arcface
'''
class ArcfaceDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train', 'all']
assert imgORvdo in ['all', 'image', 'video']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
self.images = []
self.textDics = {}
for mode in modes:
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
# img_tat = mode + '_images'
# vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
d = []
textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
textDic.append(json.load(f))
for i in range(len(textDic)):
for k in textDic[i].keys():
textDic[i][k] = text2num(textDic[i][k])
self.textDics[mode] = textDic
l = [dd['annotations'] for dd in d]
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(self.savePath, str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(i)
t.append(mode)
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName, instance_id, textName, iORv, mode = self.images[index]
img = np.load(imgName[:-4]+'.npy')
# img = cv2.imread(imgName[:-4]+'.jpg')
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
text = self.textDics[mode][iORv][textName]
text = torch.tensor(text).long()
iORv = torch.tensor(iORv).long()
h, w, c = img.shape
# print(h,w,c)
rh = random.randint(0, h-256)
rw = random.randint(0, w-256)
img = img[rh:256+rh, rw:256+rw, :]
img = cv2.resize(img, self.size)
# '''random erasing'''
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
# print(img.shape)
instance = torch.tensor(self.clsDic[str(instance_id)])
label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
return {'img':img, 'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
# return {'instance':instance, 'label':label, 'text': text, 'iORv': iORv}
class ArcfaceDatasetSeparate(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, maxLen=64, PAD=0, imgORvdo='all'):
assert mode in ['train']
assert imgORvdo in ['all']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
if imgORvdo == 'all':
tats = [mode + '_images', mode + '_videos']
elif imgORvdo == 'image':
tats = [mode + '_images']
elif imgORvdo == 'video':
tats = [mode + '_videos']
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
text2num = Text2Num(maxLen=maxLen, root_dir=root_dir, PAD=PAD)
self.vocab_size = text2num.vocab_size
d = []
self.textDic = []
for tat in tats:
with open(os.path.join(root_dir, tat+'_annotation.json'), 'r') as f:
d.append(json.load(f))
with open(os.path.join(root_dir, tat+'_text.json'), 'r') as f:
self.textDic.append(json.load(f))
for i in range(len(self.textDic)):
for k in self.textDic[i].keys():
self.textDic[i][k] = text2num(self.textDic[i][k])
l = [dd['annotations'] for dd in d]
self.images = []
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
self.instance2label = json.load(f)
names = ['image', 'video']
print('Loading data...')
for i, ll in enumerate(l):
for d in ll:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), tats[i]+str(dd['instance_id'])+d['img_name']))
t.append(dd['instance_id'])
t.append(d['img_name'].split('_')[0])
t.append(names[i])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(self.instance2label.values()))
self.dic = {}
for i in range(len(self.images)):
imgName, instance_id, textName, iORv = self.images[i]
if instance_id not in self.dic:
self.dic[instance_id] = {}
self.dic[instance_id]['image'] = []
self.dic[instance_id]['video'] = []
self.dic[instance_id][iORv].append(i)
for k in self.dic.keys():
if len(self.dic[k]['image']) == 0 or len(self.dic[k]['video']) == 0:
del self.dic[k]
self.dic = list(self.dic.items())
# self.images = self.images[:2222]
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.dic)
def __getitem__(self, index):
imgIndex = random.choice(self.dic[index][1]['image'])
vdoIndex = random.choice(self.dic[index][1]['video'])
sample = []
instances = []
for index in [imgIndex, vdoIndex]:
imgName, instance_id, textName, iORv = self.images[index]
img = np.load(os.path.join(self.savePath, imgName)[:-4]+'.npy')
# text = self.textDic[iORv][textName]
# text = torch.tensor(text).long()
# iORv = torch.tensor(iORv).long()
h, w, c = img.shape
rh_1 = random.randint(0, h-224)
rh_2 = random.randint(224, h)
rw_1 = random.randint(0, w-224)
rw_2 = random.randint(224, w)
img = img[rh_1:rh_2, rw_1:rw_2, :]
img = cv2.resize(img, self.size)
instances.append(torch.tensor(self.clsDic[str(instance_id)]))
# label = torch.tensor(self.instance2label[str(instance_id)])
if np.random.rand() < self.flip_x:
img = img[:, ::-1, :].copy()
img = torch.from_numpy(img)
img = img.permute(2, 0, 1)
img = self.transform(img)
sample.append(img)
assert instances[0] == instances[1]
return {'img': sample[0], 'vdo':sample[1], 'instance':instances[0]}
class TripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5):
assert mode in ['train']
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
self.savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
with open(os.path.join(root_dir, 'instanceID.json'), 'r') as f:
self.clsDic = json.load(f)
with open(os.path.join(root_dir, 'instance2label.json'), 'r') as f:
instance2label = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
self.images = []
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
t = []
t.append(os.path.join(str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
t.append(self.clsDic[str(dd['instance_id'])])
t.append(instance2label[str(dd['instance_id'])])
self.images.append(t)
self.num_classes = len(self.clsDic)
self.num_labels = len(set(instance2label.values()))
self.cls_ins_dic = {}
for i, l in enumerate(self.images):
imgName, instance_id, label = l
if label not in self.cls_ins_dic:
self.cls_ins_dic[label] = {}
if instance_id not in self.cls_ins_dic[label]:
self.cls_ins_dic[label][instance_id] = []
self.cls_ins_dic[label][instance_id].append(i)
for k in self.cls_ins_dic.keys():
if len(self.cls_ins_dic[k]) < 2:
raise RuntimeError('size of self.cls_ins_dic[k] must be larger than 1')
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
imgName_q, instance_id_q, label_q = self.images[index]
p_index = index
while p_index == index:
p_index = random.choice(self.cls_ins_dic[label_q][instance_id_q])
instance_id_n = instance_id_q
while instance_id_n == instance_id_q:
instance_id_n = random.choice(list(self.cls_ins_dic[label_q].keys()))
n_index = random.choice(self.cls_ins_dic[label_q][instance_id_n])
imgName_p, instance_id_p, label_p = self.images[p_index]
imgName_n, instance_id_n, label_n = self.images[n_index]
assert len(set([label_q, label_p, label_n])) == 1
assert len(set([instance_id_q, instance_id_p])) == 1
instance_id_q = torch.tensor(instance_id_q)
instance_id_p = torch.tensor(instance_id_p)
instance_id_n = torch.tensor(instance_id_n)
img_q = np.load(os.path.join(self.savePath, imgName_q)[:-4]+'.npy')
img_p = np.load(os.path.join(self.savePath, imgName_p)[:-4]+'.npy')
img_n = np.load(os.path.join(self.savePath, imgName_n)[:-4]+'.npy')
hq, wq, cq = img_q.shape
hp, wp, cp = img_p.shape
hn, wn, cn = img_n.shape
rh = random.randint(0, hq-self.size[0])
rw = random.randint(0, wq-self.size[1])
img_q = img_q[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hp-self.size[0])
rw = random.randint(0, wp-self.size[1])
img_p = img_p[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
rh = random.randint(0, hn-self.size[0])
rw = random.randint(0, wn-self.size[1])
img_n = img_n[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
if np.random.rand() < self.flip_x:
img_q = img_q[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_p = img_p[:, ::-1, :].copy()
if np.random.rand() < self.flip_x:
img_n = img_n[:, ::-1, :].copy()
img_q = torch.from_numpy(img_q).permute(2, 0, 1)
img_p = torch.from_numpy(img_p).permute(2, 0, 1)
img_n = torch.from_numpy(img_n).permute(2, 0, 1)
img_q = self.transform(img_q)
img_p = self.transform(img_p)
img_n = self.transform(img_n)
return {
'img_q':img_q,
'img_p':img_p,
'img_n':img_n,
'img_q_instance':instance_id_q,
'img_p_instance':instance_id_p,
'img_n_instance':instance_id_n,
}
class HardTripletDataset(Dataset):
def __init__(self, root_dir='data', mode='train', size=(112, 112), flip_x=0.5, n_samples=4):
assert mode in ['train', 'all', 'train_2']
mean=[0.55574415, 0.51230767, 0.51123354]
aa_params = dict(
translate_const=int(size[0] * 0.40),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
self.randAug = rand_augment_transform('rand-m9-n3-mstd0.5', aa_params)
self.root_dir = root_dir
self.size = size
self.flip_x = flip_x
self.n_samples = n_samples
if mode == 'train':
modes = ['train']
instanceFile = 'instanceID.json'
elif mode == 'train_2':
modes = ['train', 'validation_2']
instanceFile = 'instanceID_2.json'
elif mode == 'all':
modes = ['train', 'validation_2', 'validation']
instanceFile = 'instanceID_all.json'
with open(os.path.join(root_dir, instanceFile), 'r') as f:
self.clsDic = json.load(f)
self.samples = {}
for mode in modes:
img_tat = mode + '_images'
vdo_tat = mode + '_videos'
savePath = mode + '_instance'
savePath = os.path.join(root_dir, savePath)
with open(os.path.join(root_dir, img_tat+'_annotation.json'), 'r') as f:
d_i = json.load(f)
with open(os.path.join(root_dir, vdo_tat+'_annotation.json'), 'r') as f:
d_v = json.load(f)
l_i = d_i['annotations']
l_v = d_v['annotations']
print('Loading data...')
for d in l_i:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), img_tat+str(dd['instance_id'])+d['img_name']))
for d in l_v:
for dd in d['annotations']:
if dd['instance_id'] > 0 and str(dd['instance_id']) in self.clsDic.keys():
instance = self.clsDic[str(dd['instance_id'])]
if instance not in self.samples:
self.samples[instance] = []
self.samples[instance].append(
os.path.join(savePath, str(dd['instance_id']), vdo_tat+str(dd['instance_id'])+d['img_name']))
self.num_classes = len(self.clsDic)
for k in self.samples.keys():
while len(self.samples[k]) < n_samples:
self.samples[k] *= 2
assert len(self.samples[k]) >= n_samples
self.instances = list(self.samples.keys())
print('Done')
self.transform = transforms.Normalize(
mean=[0.55574415, 0.51230767, 0.51123354],
std=[0.21303795, 0.21604613, 0.21273348])
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
instance = self.instances[index]
imgPaths = random.sample(self.samples[instance], self.n_samples)
imgs = []
instances = []
for imgPath in imgPaths:
img = np.load(imgPath[:-4]+'.npy')
# '''randAug'''
# img = Image.fromarray(np.uint8(img*255))
# img = self.randAug(img)
# img.save('aaa.jpg')
# img = np.array(img)
# img = img.astype(np.float32) / 255
# '''randAug'''
assert self.size[0] == 256
if self.size[0] != 256:
r = 256 / self.size[0]
img = cv2.resize(img, (int(270/r), int(270/r)))
h, w, c = img.shape
rh = random.randint(0, h-self.size[0])
rw = random.randint(0, w-self.size[1])
img = img[rh:self.size[0]+rh, rw:self.size[1]+rw, :]
# if np.random.rand() < 0.5:
# w = h = 256
# while w >= 256 or h >= 256:
# r = np.random.uniform(0.3, 1/0.3)
# s = 256*256*np.random.uniform(0.02, 0.4)
# w = int(np.sqrt(s*r))
# h = int(np.sqrt(s/r))
# s_w = random.randint(0, 256-w)
# s_h = random.randint(0, 256-h)
# img[s_h:s_h+h, s_w:s_w+w, :] = 0
instance_t = torch.tensor(instance)
if | np.random.rand() | numpy.random.rand |
#
# lines.py
#
# purpose: Reproduce LineCurvature2D.m and LineNormals2D.m
# author: <NAME>
# e-mail: <EMAIL>
# web: http://ocefpaf.tiddlyspot.com/
# created: 17-Jul-2012
# modified: Mon 02 Mar 2015 10:07:06 AM BRT
#
# obs:
#
import numpy as np
def LineNormals2D(Vertices, Lines):
r"""This function calculates the normals, of the line points using the
neighbouring points of each contour point, and forward an backward
differences on the end points.
N = LineNormals2D(V, L)
inputs,
V : List of points/vertices 2 x M
(optional)
Lines : A N x 2 list of line pieces, by indices of the vertices
(if not set assume Lines=[1 2; 2 3 ; ... ; M-1 M])
outputs,
N : The normals of the Vertices 2 x M
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.load('testdata.npz')
>>> Lines, Vertices = data['Lines'], data['Vertices']
>>> N = LineNormals2D(Vertices, Lines)
>>> fig, ax = plt.subplots(nrows=1, ncols=1)
>>> _ = ax.plot(np.c_[Vertices[:, 0], Vertices[:,0 ] + 10 * N[:, 0]].T,
... np.c_[Vertices[:, 1], Vertices[:, 1] + 10 * N[:, 1]].T)
Function based on LineNormals2D.m written by
D.Kroon University of Twente (August 2011)
"""
eps = np.spacing(1)
if isinstance(Lines, np.ndarray):
pass
elif not Lines:
Lines = np.c_[np.arange(1, Vertices.shape[0]),
np.arange(2, Vertices.shape[0] + 1)]
else:
print("Lines is passed but not recognized.")
# Calculate tangent vectors.
DT = Vertices[Lines[:, 0] - 1, :] - Vertices[Lines[:, 1] - 1, :]
# Make influence of tangent vector 1/Distance (Weighted Central
# Differences. Points which are closer give a more accurate estimate of
# the normal).
LL = np.sqrt(DT[:, 0] ** 2 + DT[:, 1] ** 2)
DT[:, 0] = DT[:, 0] / np.maximum(LL ** 2, eps)
DT[:, 1] = DT[:, 1] / np.maximum(LL ** 2, eps)
D1 = np.zeros_like(Vertices)
D2 = np.zeros_like(Vertices)
D1[Lines[:, 0] - 1, :] = DT
D2[Lines[:, 1] - 1, :] = DT
D = D1 + D2
# Normalize the normal.
LL = np.sqrt(D[:, 0] ** 2 + D[:, 1] ** 2)
N = np.zeros_like(D)
N[:, 0] = -D[:, 1] / LL
N[:, 1] = D[:, 0] / LL
return N
def LineCurvature2D(Vertices, Lines=None):
r"""This function calculates the curvature of a 2D line. It first fits
polygons to the points. Then calculates the analytical curvature from
the polygons.
k = LineCurvature2D(Vertices,Lines)
inputs,
Vertices : A M x 2 list of line points.
(optional)
Lines : A N x 2 list of line pieces, by indices of the vertices
(if not set assume Lines=[1 2; 2 3 ; ... ; M-1 M])
outputs,
k : M x 1 Curvature values
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> data = np.load('testdata.npz', squeeze_me=True)
>>> Lines, Vertices = data['Lines'], data['Vertices']
>>> k = LineCurvature2D(Vertices, Lines)
>>> N = LineNormals2D(Vertices, Lines)
>>> k = k * 100
>>> fig, ax = plt.subplots(nrows=1, ncols=1)
>>> _ = ax.plot(np.c_[Vertices[:, 0], Vertices[:, 0] + k * N[:, 0]].T,
... np.c_[Vertices[:, 1], Vertices[:, 1] + k * N[:, 1]].T, 'g')
>>> _ = ax.plot(np.c_[Vertices[Lines[:, 0] - 1, 0],
... Vertices[Lines[:, 1] - 1, 0]].T,
... np.c_[Vertices[Lines[:, 0] - 1, 1],
... Vertices[Lines[:, 1] - 1, 1]].T, 'b')
>>> _ = ax.plot(Vertices[:, 0], Vertices[:, 1], 'r.')
Function based on LineCurvature2D.m written by
<NAME> of Twente (August 2011)
"""
# If no line-indices, assume a x[0] connected with x[1], x[2] with x[3].
if isinstance(Lines, np.ndarray):
pass
elif not Lines:
Lines = np.c_[np.arange(1, Vertices.shape[0]),
np.arange(2, Vertices.shape[0] + 1)]
else:
print("Lines is passed but not recognized.")
# Get left and right neighbor of each points.
Na = np.zeros(Vertices.shape[0], dtype=np.int)
Nb = np.zeros_like(Na)
# As int because we use it to index an array...
Na[Lines[:, 0] - 1] = Lines[:, 1]
Nb[Lines[:, 1] - 1] = Lines[:, 0]
# Check for end of line points, without a left or right neighbor.
checkNa = Na == 0
checkNb = Nb == 0
Naa, Nbb = Na, Nb
Naa[checkNa] = np.where(checkNa)[0]
Nbb[checkNb] = np.where(checkNb)[0]
# If no left neighbor use two right neighbors, and the same for right.
Na[checkNa] = Nbb[Nbb[checkNa]]
Nb[checkNb] = Naa[Naa[checkNb]]
# ... Also, I remove `1` to get python indexing correctly.
Na -= 1
Nb -= 1
# Correct for sampling differences.
Ta = -np.sqrt(np.sum((Vertices - Vertices[Na, :]) ** 2, axis=1))
Tb = np.sqrt(np.sum((Vertices - Vertices[Nb, :]) ** 2, axis=1))
# If no left neighbor use two right neighbors, and the same for right.
Ta[checkNa] = -Ta[checkNa]
Tb[checkNb] = -Tb[checkNb]
x = np.c_[Vertices[Na, 0], Vertices[:, 0], Vertices[Nb, 0]]
y = np.c_[Vertices[Na, 1], Vertices[:, 1], Vertices[Nb, 1]]
M = np.c_[np.ones_like(Tb),
-Ta,
Ta ** 2,
| np.ones_like(Tb) | numpy.ones_like |
import tensorflow as tf
import os
import joblib
import numpy as np
from mpi4py import MPI
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
# from coinrun.config import Config
from coinrun.config_dqn import Config
from coinrun import setup_utils, wrappers
import platform
import gym
class Scalarize:
"""
Convert a VecEnv into an Env
There is a minor difference between this and a normal Env, which is that
the final observation (when done=True) does not exist, and instead you will
receive the second to last observation a second time due to how the VecEnv
interface handles resets. In addition, you are cannot step this
environment after done is True, since that is not possible for VecEnvs.
"""
def __init__(self, venv) -> None:
assert venv.num_envs == 1
self._venv = venv
self._waiting_for_reset = True
self._previous_obs = None
self.observation_space = self._venv.observation_space
self.action_space = self._venv.action_space
self.metadata = self._venv.metadata
# self.spec = self._venv.spec
self.reward_range = self._venv.reward_range
def _process_obs(self, obs):
if isinstance(obs, dict):
# dict space
scalar_obs = {}
for k, v in obs.items():
scalar_obs[k] = v[0]
return scalar_obs
else:
return obs[0]
def reset(self):
self._waiting_for_reset = False
obs = self._venv.reset()
self._previous_obs = obs
return self._process_obs(obs)
def step(self, action):
assert not self._waiting_for_reset
final_action = action
if isinstance(self.action_space, gym.spaces.Discrete):
final_action = np.array([action], dtype=self._venv.action_space.dtype)
else:
final_action = np.expand_dims(action, axis=0)
obs, rews, dones, infos = self._venv.step(final_action)
if dones[0]:
self._waiting_for_reset = True
obs = self._previous_obs
else:
self._previous_obs = obs
return self._process_obs(obs), rews[0], dones[0], infos[0]
def render(self, mode="human"):
if mode == "human":
return self._venv.render(mode=mode)
else:
return self._venv.get_images(mode=mode)[0]
def close(self):
return self._venv.close()
def seed(self, seed=None):
return self._venv.seed(seed)
@property
def unwrapped(self):
# it might make more sense to return the venv.unwrapped here
# except that the interface is different for a venv so things are unlikely to work
return self
def __repr__(self):
return f"<Scalarize venv={self._venv}>"
def make_general_env(num_env, seed=0, use_sub_proc=True):
from coinrun import coinrunenv
env = coinrunenv.make(Config.GAME_TYPE, num_env)
if Config.FRAME_STACK > 1:
env = VecFrameStack(env, Config.FRAME_STACK)
epsilon = Config.EPSILON_GREEDY
if epsilon > 0:
env = wrappers.EpsilonGreedyWrapper(env, epsilon)
return env
def file_to_path(filename):
return setup_utils.file_to_path(filename)
def load_all_params(sess):
load_params_for_scope(sess, 'model')
def load_params_for_scope(sess, scope, load_key='default'):
load_data = Config.get_load_data(load_key)
if load_data is None:
return False
params_dict = load_data['params']
if scope in params_dict:
print('Loading saved file for scope', scope)
loaded_params = params_dict[scope]
loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True)
restore_params(sess, loaded_params, params)
return True
def get_savable_params(loaded_params, scope, keep_heads=False):
params = tf.trainable_variables(scope)
filtered_params = []
filtered_loaded = []
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
for p, loaded_p in zip(params, loaded_params):
keep = True
if any((scope + '/' + x) in p.name for x in ['v','pi']):
keep = keep_heads
if keep:
filtered_params.append(p)
filtered_loaded.append(loaded_p)
else:
print('drop', p)
return filtered_loaded, filtered_params
def restore_params(sess, loaded_params, params):
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
restores = []
for p, loaded_p in zip(params, loaded_params):
print('restoring', p)
restores.append(p.assign(loaded_p))
sess.run(restores)
def save_params_in_scopes(sess, scopes, filename, base_dict=None):
data_dict = {}
if base_dict is not None:
data_dict.update(base_dict)
save_path = file_to_path(filename)
data_dict['args'] = Config.get_args_dict()
param_dict = {}
for scope in scopes:
params = tf.trainable_variables(scope)
if len(params) > 0:
print('saving scope', scope, filename)
ps = sess.run(params)
param_dict[scope] = ps
data_dict['params'] = param_dict
joblib.dump(data_dict, save_path)
def setup_mpi_gpus():
if 'RCALL_NUM_GPU' not in os.environ:
return
num_gpus = int(os.environ['RCALL_NUM_GPU'])
node_id = platform.node()
nodes = MPI.COMM_WORLD.allgather(node_id)
local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id])
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def is_mpi_root():
return MPI.COMM_WORLD.Get_rank() == 0
def tf_initialize(sess):
sess.run(tf.initialize_all_variables())
sync_from_root(sess)
def sync_from_root(sess, vars=None):
if vars is None:
vars = tf.trainable_variables()
if Config.SYNC_FROM_ROOT:
rank = MPI.COMM_WORLD.Get_rank()
print('sync from root', rank)
for var in vars:
if rank == 0:
MPI.COMM_WORLD.bcast(sess.run(var))
else:
sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))
def mpi_average(values):
return mpi_average_comm(values, MPI.COMM_WORLD)
def mpi_average_comm(values, comm):
size = comm.size
x = np.array(values)
buf = np.zeros_like(x)
comm.Allreduce(x, buf, op=MPI.SUM)
buf = buf / size
return buf
def mpi_average_train_test(values):
return mpi_average_comm(values, Config.TRAIN_TEST_COMM)
def mpi_print(*args):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
print(*args)
def process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0):
rewards = [epinfo['r'] for epinfo in epinfobuf]
rew_mean = | np.nanmean(rewards) | numpy.nanmean |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0)
#find average temps
cities = ['Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
X1 = np.array(df_data1.loc[:,'Month':'Path66'])
X2 = np.column_stack((HDD,CDD,HDD_wind,CDD_wind))
cX = np.column_stack((X1,X2))
df_data = pd.DataFrame(cX)
df_data.rename(columns={0:'Month'}, inplace=True)
df_data.rename(columns={3:'Path46'}, inplace=True)
df_data.rename(columns={4:'Weekday'}, inplace=True)
jan = df_data.loc[df_data['Month'] == 1,:]
feb = df_data.loc[df_data['Month'] == 2,:]
mar = df_data.loc[df_data['Month'] == 3,:]
apr = df_data.loc[df_data['Month'] == 4,:]
may = df_data.loc[df_data['Month'] == 5,:]
jun = df_data.loc[df_data['Month'] == 6,:]
jul = df_data.loc[df_data['Month'] == 7,:]
aug = df_data.loc[df_data['Month'] == 8,:]
sep = df_data.loc[df_data['Month'] == 9,:]
oct = df_data.loc[df_data['Month'] == 10,:]
nov = df_data.loc[df_data['Month'] == 11,:]
dec = df_data.loc[df_data['Month'] == 12,:]
y = df_data.loc[:,'Path46']
#multivariate regression
jan_reg_46 = linear_model.LinearRegression()
feb_reg_46 = linear_model.LinearRegression()
mar_reg_46 = linear_model.LinearRegression()
apr_reg_46 = linear_model.LinearRegression()
may_reg_46 = linear_model.LinearRegression()
jun_reg_46 = linear_model.LinearRegression()
jul_reg_46 = linear_model.LinearRegression()
aug_reg_46 = linear_model.LinearRegression()
sep_reg_46 = linear_model.LinearRegression()
oct_reg_46 = linear_model.LinearRegression()
nov_reg_46 = linear_model.LinearRegression()
dec_reg_46 = linear_model.LinearRegression()
# Train the model using the training sets
jan_reg_46.fit(jan.loc[:,'Weekday':],jan.loc[:,'Path46'])
feb_reg_46.fit(feb.loc[:,'Weekday':],feb.loc[:,'Path46'])
mar_reg_46.fit(mar.loc[:,'Weekday':],mar.loc[:,'Path46'])
apr_reg_46.fit(apr.loc[:,'Weekday':],apr.loc[:,'Path46'])
may_reg_46.fit(may.loc[:,'Weekday':],may.loc[:,'Path46'])
jun_reg_46.fit(jun.loc[:,'Weekday':],jun.loc[:,'Path46'])
jul_reg_46.fit(jul.loc[:,'Weekday':],jul.loc[:,'Path46'])
aug_reg_46.fit(aug.loc[:,'Weekday':],aug.loc[:,'Path46'])
sep_reg_46.fit(sep.loc[:,'Weekday':],sep.loc[:,'Path46'])
oct_reg_46.fit(oct.loc[:,'Weekday':],oct.loc[:,'Path46'])
nov_reg_46.fit(nov.loc[:,'Weekday':],nov.loc[:,'Path46'])
dec_reg_46.fit(dec.loc[:,'Weekday':],dec.loc[:,'Path46'])
# Make predictions using the testing set
predicted = []
rc = np.shape(jan.loc[:,'Weekday':])
n = rc[1]
for i in range(0,len(y)):
m = df_data.loc[i,'Month']
if m==1:
s = jan.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jan_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==2:
s = feb.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = feb_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==3:
s = mar.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = mar_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==4:
s = apr.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = apr_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==5:
s = may.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = may_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==6:
s = jun.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jun_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==7:
s = jul.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = jul_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==8:
s = aug.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = aug_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==9:
s = sep.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = sep_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==10:
s = oct.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = oct_reg_46.predict(s)
predicted = np.append(predicted,p)
elif m==11:
s = nov.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = nov_reg_46.predict(s)
predicted = np.append(predicted,p)
else:
s = dec.loc[i,'Weekday':]
s = np.reshape(s[:,None],(1,n))
p = dec_reg_46.predict(s)
predicted = np.append(predicted,p)
Path46_p = predicted
# Residuals
residuals = predicted - y.values
Residuals46 = np.reshape(residuals[730:],(1095,1))
Path46_y = y.values
# RMSE
RMSE = (np.sum((residuals**2))/len(residuals))**.5
##R2
#a=st.pearsonr(y,predicted)
#print a[0]**2
###############################
# NW PATHS
###############################
#import data
df_data1 = pd.read_excel('Synthetic_demand_pathflows/NW_Path_data.xlsx',sheet_name='Daily',header=0)
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise','Tuscon','Phoenix','Vegas','Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_data1)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
for i in cities:
n1 = i + '_AvgT'
n2 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = df_data1.loc[:,n1]
Wind[:,j] = df_data1.loc[:,n2]
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = | np.max((0,65-AvgT[i,j])) | numpy.max |
from sklearn import svm
from menpo.shape import PointCloud
from menpo.shape import TriMesh
from menpo.image import MaskedImage
from menpo.visualize.base import Viewable
from scipy.spatial.distance import euclidean as dist
import numpy as np
class SVS(Viewable):
def __init__(self, points, tplt_edge=None, nu=0.5, kernel='rbf', gamma=0.03,
tolerance=0.5, max_f=5):
self.points = points
self._build(nu, kernel, gamma, tolerance, tplt_edge, max_f)
def _build(self, nu, kernel, gamma, tolerance, tplt_edge, max_f):
accept_rate = 0.5
margin = 10
sample_step = 1
min_p = np.min(self.points, axis=0).astype('int')
max_p = np.max(self.points, axis=0).astype('int')
self._range_x = range_x = np.arange(
min_p[0]-margin, max_p[0]+margin, sample_step
)
self._range_y = range_y = np.arange(
min_p[1]-margin, max_p[1]+margin, sample_step
)
# Generate negtive points
# Build Triangle Mesh
if tplt_edge is None:
tplt_tri = TriMesh(self.points).trilist
# Generate Edge List
tplt_edge = tplt_tri[:, [0, 1]]
tplt_edge = np.vstack((tplt_edge, tplt_tri[:, [0, 2]]))
tplt_edge = np.vstack((tplt_edge, tplt_tri[:, [1, 2]]))
tplt_edge = np.sort(tplt_edge)
# Get Unique Edge
b = np.ascontiguousarray(tplt_edge).view(
np.dtype(
(np.void, tplt_edge.dtype.itemsize * tplt_edge.shape[1])
)
)
_, idx = np.unique(b, return_index=True)
tplt_edge = tplt_edge[idx]
# Sample Points
training_points_negative = []
training_points_positive = []
for i in range_x:
for j in range_y:
valid = True
max_dist = 100*tolerance
for e in tplt_edge:
min_dist = minimum_distance(
self.points[e[0]],
self.points[e[1]],
np.array([i, j]),
accept_rate
)
if min_dist < max_dist:
max_dist = min_dist
if min_dist < tolerance:
valid = False
if min_dist < accept_rate:
training_points_positive.append([i, j])
break
if valid and max_dist < max_f*tolerance:
training_points_negative.append([i, j])
training_points_negative = | np.array(training_points_negative) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 11:03:10 2020
@author: sergio.lordano
"""
import numpy as np
from scipy import ndimage
def read_shadow_beam(beam, x_column_index=1, y_column_index=3, nbins_x=100, nbins_y=100, nolost = 1, ref = 23, zeroPadding=0, gaussian_filter=0):
"""
Parameters
----------
beam : ShadowBeam()
General Shadow beam object.
x_column_index : int
Shadow column number for x axis. The default is 1.
y_column_index : int
Shadow column number for y axis. The default is 3.
nbins_x : int
Number of bins for x axis. The default is 100.
nbins_y : int
Number of bins for y axis. The default is 100.
nolost : int
1 to use only good rays; 0 to use good and lost rays. The default is 1.
ref : TYPE, optional
Shadow column used as weights. The default is 23 (intensity).
zeroPadding : float
Range factor for inserting zeros in the beam matrix. The default is 0.
gaussian_filter : float
A float larger than 0 to apply gaussian filter. The default is 0.
Returns
-------
XY : float array
returns a 2D numpy array where first row is x coordinates, first column
is y coordinates, [0,0] is not used, and [1:1:] is the 2D histogram.
"""
histo2D = beam.histo2(col_h = x_column_index, col_v = y_column_index, nbins_h = nbins_x, nbins_v = nbins_y, nolost = nolost, ref = ref)
x_axis = histo2D['bin_h_center']
y_axis = histo2D['bin_v_center']
xy = histo2D['histogram']
if(zeroPadding==0):
XY = np.zeros((nbins_y+1,nbins_x+1))
XY[1:,0] = y_axis
XY[0,1:] = x_axis
XY[1:,1:] = np.array(xy).transpose()
if(gaussian_filter != 0):
XY[1:,1:] = ndimage.gaussian_filter(np.array(xy).transpose(), gaussian_filter)
else:
x_step = x_axis[1]-x_axis[0]
y_step = y_axis[1]-y_axis[0]
fct = zeroPadding
XY = np.zeros((nbins_y+15, nbins_x+15))
XY[8:nbins_y+8,0] = y_axis
XY[0,8:nbins_x+8] = x_axis
XY[8:nbins_y+8,8:nbins_x+8] = np.array(xy).transpose()
XY[1,0] = np.min(y_axis) - (np.max(y_axis) - np.min(y_axis))*fct
XY[2:-1,0] = np.linspace(y_axis[0] - 6*y_step, y_axis[-1] + 6*y_step, nbins_y+12)
XY[-1,0] = np.max(y_axis) + (np.max(y_axis) - np.min(y_axis))*fct
XY[0,1] = np.min(x_axis) - (np.max(x_axis) - np.min(x_axis))*fct
XY[0,2:-1] = np.linspace(x_axis[0] - 6*x_step, x_axis[-1] + 6*x_step, nbins_x+12)
XY[0,-1] = np.max(x_axis) + ( | np.max(x_axis) | numpy.max |
import logging
import os
import shutil
import sys
import scipy.sparse as sparse
import numpy as np
import torch
def save_checkpoint(state, is_best, checkpoint_dir, logger=None):
"""Saves model and training parameters at '{checkpoint_dir}/last_checkpoint.pytorch'.
If is_best==True saves '{checkpoint_dir}/best_checkpoint.pytorch' as well.
Args:
state (dict): contains model's state_dict, optimizer's state_dict, epoch
and best evaluation metric value so far
is_best (bool): if True state contains the best model seen so far
checkpoint_dir (string): directory where the checkpoint are to be saved
"""
def log_info(message):
if logger is not None:
logger.info(message)
if not os.path.exists(checkpoint_dir):
log_info(
f"Checkpoint directory does not exists. Creating {checkpoint_dir}")
os.mkdir(checkpoint_dir)
last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')
log_info(f"Saving last checkpoint to '{last_file_path}'")
torch.save(state, last_file_path)
if is_best:
best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')
log_info(f"Saving best checkpoint to '{best_file_path}'")
shutil.copyfile(last_file_path, best_file_path)
def load_checkpoint(checkpoint_path, model, optimizer=None):
"""Loads model and training parameters from a given checkpoint_path
If optimizer is provided, loads optimizer's state_dict of as well.
Args:
checkpoint_path (string): path to the checkpoint to be loaded
model (torch.nn.Module): model into which the parameters are to be copied
optimizer (torch.optim.Optimizer) optional: optimizer instance into
which the parameters are to be copied
Returns:
state
"""
if not os.path.exists(checkpoint_path):
raise IOError(f"Checkpoint '{checkpoint_path}' does not exist")
state = torch.load(checkpoint_path)
model.load_state_dict(state['model_state_dict'])
if optimizer is not None:
optimizer.load_state_dict(state['optimizer_state_dict'])
return state
def get_logger(name, level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
# Logging to console
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def get_number_of_learnable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
class RunningAverage:
"""Computes and stores the average
"""
def __init__(self):
self.count = 0
self.sum = 0
self.avg = 0
def update(self, value, n=1):
self.count += n
self.sum += value * n
self.avg = self.sum / self.count
def find_maximum_patch_size(model, device):
"""Tries to find the biggest patch size that can be send to GPU for inference
without throwing CUDA out of memory"""
logger = get_logger('PatchFinder')
in_channels = model.in_channels
patch_shapes = [(64, 128, 128), (96, 128, 128),
(64, 160, 160), (96, 160, 160),
(64, 192, 192), (96, 192, 192)]
for shape in patch_shapes:
# generate random patch of a given size
patch = np.random.randn(*shape).astype('float32')
patch = torch \
.from_numpy(patch) \
.view((1, in_channels) + patch.shape) \
.to(device)
logger.info(f"Current patch size: {shape}")
model(patch)
def unpad(probs, index, shape, pad_width=8):
def _new_slices(slicing, max_size):
if slicing.start == 0:
p_start = 0
i_start = 0
else:
p_start = pad_width
i_start = slicing.start + pad_width
if slicing.stop == max_size:
p_stop = None
i_stop = max_size
else:
p_stop = -pad_width
i_stop = slicing.stop - pad_width
return slice(p_start, p_stop), slice(i_start, i_stop)
D, H, W = shape
i_c, i_z, i_y, i_x = index
p_c = slice(0, probs.shape[0])
p_z, i_z = _new_slices(i_z, D)
p_y, i_y = _new_slices(i_y, H)
p_x, i_x = _new_slices(i_x, W)
probs_index = (p_c, p_z, p_y, p_x)
index = (i_c, i_z, i_y, i_x)
return probs[probs_index], index
def create_feature_maps(init_channel_number, number_of_fmaps):
# return [init_channel_number * 2 ** k for k in range(number_of_fmaps)]
return [min(init_channel_number * 2 ** k, 1024) for k in range(number_of_fmaps)]
# Code taken from https://github.com/cremi/cremi_python
def adapted_rand(seg, gt, all_stats=False):
"""Compute Adapted Rand error as defined by the SNEMI3D contest [1]
Formula is given as 1 - the maximal F-score of the Rand index
(excluding the zero component of the original labels). Adapted
from the SNEMI3D MATLAB script, hence the strange style.
Parameters
----------
seg : np.ndarray
the segmentation to score, where each value is the label at that point
gt : np.ndarray, same shape as seg
the groundtruth to score against, where each value is a label
all_stats : boolean, optional
whether to also return precision and recall as a 3-tuple with rand_error
Returns
-------
are : float
The adapted Rand error; equal to $1 - \frac{2pr}{p + r}$,
where $p$ and $r$ are the precision and recall described below.
prec : float, optional
The adapted Rand precision. (Only returned when `all_stats` is ``True``.)
rec : float, optional
The adapted Rand recall. (Only returned when `all_stats` is ``True``.)
References
----------
[1]: http://brainiac2.mit.edu/SNEMI3D/evaluation
"""
# just to prevent division by 0
epsilon = 1e-6
# segA is truth, segB is query
segA = np.ravel(gt)
segB = np.ravel(seg)
n = segA.size
n_labels_A = np.amax(segA) + 1
n_labels_B = np.amax(segB) + 1
ones_data = np.ones(n)
p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B))
a = p_ij[1:n_labels_A, :]
b = p_ij[1:n_labels_A, 1:n_labels_B]
c = p_ij[1:n_labels_A, 0].todense()
d = b.multiply(b)
a_i = np.array(a.sum(1))
b_i = np.array(b.sum(0))
sumA = np.sum(a_i * a_i)
sumB = | np.sum(b_i * b_i) | numpy.sum |
#! /usr/bin/env python3
"""This script demonstrates Aboleth's imputation layers."""
import logging
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_covtype
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, log_loss, confusion_matrix
from sklearn.preprocessing import StandardScaler
import aboleth as ab
# Set up a python logger so we can see the output of MonitoredTrainingSession
logger = logging.getLogger()
logger.setLevel(logging.INFO)
RSEED = 666
ab.set_hyperseed(RSEED)
CONFIG = tf.ConfigProto(device_count={'GPU': 0}) # Use GPU ?
FRAC_TEST = 0.1 # Fraction of data to use for hold-out testing
FRAC_MISSING = 0.2 # Fraction of data that is missing
MISSING_VAL = -666 # Value to indicate missingness
NCLASSES = 7 # Number of target classes
# Imputation method CHANGE THESE
# METHOD = None
# METHOD = "LearnedNormalImpute"
# METHOD = "FixedNormalImpute"
# METHOD = "FixedScalarImpute"
METHOD = "LearnedScalarImpute"
# METHOD = "MeanImpute"
# Optimization
NEPOCHS = 5 # Number of times to see the data in training
BSIZE = 100 # Mini batch size
LSAMPLES = 3 # Number of samples for training
PSAMPLES = 50 # Number of predictions samples
def main():
"""Run the imputation demo."""
# Fetch data, one-hot targets and standardise data
data = fetch_covtype()
Xo = data.data[:, :10]
Xc = data.data[:, 10:]
Y = (data.target - 1)
Xo[:, :10] = StandardScaler().fit_transform(Xo[:, :10])
# Network construction
n_samples_ = tf.placeholder_with_default(LSAMPLES, [])
data_input = ab.InputLayer(name='Xo', n_samples=n_samples_) # Data input
# Run this with imputation
if METHOD is not None:
print("Imputation method {}.".format(METHOD))
# Fake some missing data
rnd = np.random.RandomState(RSEED)
mask = rnd.rand(*Xo.shape) < FRAC_MISSING
Xo[mask] = MISSING_VAL
# Use Aboleth to imputate
mask_input = ab.MaskInputLayer(name='M') # Missing data mask input
xm = np.ma.array(Xo, mask=mask)
if METHOD == "LearnedNormalImpute":
mean = tf.Variable( | np.ma.mean(xm, axis=0) | numpy.ma.mean |
from ClusterDataGen.Feature_env import Features
import networkx as nx
import pandas as pd
import numpy as np
import itertools
import copy
# return reticulation nodes
def reticulations(G):
return [v for v in G.nodes() if G.in_degree(v) == 2]
# for non-binary give ret number per reticulation node
def reticulations_non_binary(G):
return [G.in_degree(i)-1 for i in G.nodes if G.in_degree(i) >= 2]
# return leaves from network
def leaves(net):
return [u for u in net.nodes() if net.out_degree(u) == 0]
# MAKE TREES FROM NETWORK
def net_to_tree(net, num_trees=None, distances=True, partial=False, net_lvs=None):
# we only consider binary networks here
tree_set = dict()
rets = reticulations(net)
ret_num = len(rets)
if net_lvs is not None:
tree_lvs = []
if ret_num == 0:
return False
if num_trees is None:
ret_dels_tmp = itertools.product(*[np.arange(2)]*ret_num)
ret_dels = None
for opt in ret_dels_tmp:
opt = np.array(opt).reshape([1, -1])
try:
ret_dels = np.vstack([ret_dels, opt])
except:
ret_dels = opt
else:
ret_dels_set = set()
its = 0
while len(ret_dels_set) < num_trees:
ret_dels_set.add(tuple( | np.random.randint(0, 2, ret_num) | numpy.random.randint |
import numpy as np
import scipy.stats.distributions as sc_dist
from itertools import compress
def aggarwal_limits(mu, alpha=0.68268949):
"""Get Poissonian limits for specified contour levels
Parameters
----------
pdfs : array_like
The expected number of events (Poisson mean) in each observable bin.
Shape: [n_bins]
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
mu_large = np.zeros((len(mu), len(alpha)))
alpha_large = np.zeros_like(mu_large)
for i, a_i in enumerate(alpha):
alpha_large[:, i] = a_i
mu_large[:, i] = mu
mu_large_flat = mu_large.reshape(np.prod(mu_large.shape))
alpha_large_flat = alpha_large.reshape(mu_large_flat.shape)
lower, upper = sc_dist.poisson.interval(alpha_large_flat, mu_large_flat)
lower[lower != 0] -= 0.5
upper += 0.5
return lower.reshape(mu_large.shape), upper.reshape(mu_large.shape)
def aggarwal_limits_pdf(pdfs, ks, alpha=0.68268949):
"""Get limits for specified contour levels
In contrast to `aggarwal_limits` this function computes the limits based
on the evaluated and normalized likelihood as opposed to the theoretical
limits from the Poisson disribution.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
lower = np.zeros((len(pdfs), len(alpha)))
upper = np.zeros((len(pdfs), len(alpha)))
for i, pdf in enumerate(pdfs):
if len(ks[i]) == 0:
continue
cdf = np.cumsum(pdf)
if cdf[-1] < 0.999:
print('Cdf only goes up to {}'.format(cdf[-1]))
lower[i, :] = np.nan
upper[i, :] = np.nan
continue
for j, alpha_j in enumerate(alpha):
q1 = (1.-alpha_j) / 2.
q2 = (1.+alpha_j) / 2.
lower_idx = np.searchsorted(cdf, q1)
upper_idx = np.searchsorted(cdf, q2)
lower[i, j] = ks[i][lower_idx]
upper[i, j] = ks[i][upper_idx]
lower[lower != 0] -= 0.5
upper += 0.5
return lower, upper
def evaluate_normalized_likelihood(llh_func, coverage,
first_guess, **llh_kwargs):
"""Compute normalized likelihood
This function evaluates the likelihood function `llh_func` iteratively over
possible values of k (observed number of events in Poissonian) until
the specified coverage is reached.
This can then be used to normalize the likelihood and to define the PDF
in observed values k and to compute the limits in k.
Parameters
----------
llh_func : callable
The likelihood function
coverage : float
The minimum coverage value to obtain. Max value is 1. The closer to
1, the more accurate, but also more time consuming.
first_guess : float
A first guess of the valid range of k values. Typically, this can
be set to the expected number of values in the observable bin.
**llh_kwargs
Keyword arguments that are passed on to the likelihood function.
Returns
-------
array_like
The (sorted) k values at which the likelhood was evaluted.
array_like
The corresponding likelihood values to each of the (sorted) k values.
These are normalized, i.e. their sum should approach 1, but be at
least as high as the specified `coverage`.
"""
mu = int(first_guess)
prob = np.exp(llh_func(mu, **llh_kwargs))
unsorted_pdf = [prob]
ks = [mu]
max_k = mu
min_k = mu
reached_bottom = False
while prob < coverage:
if not reached_bottom:
if min_k == 0:
reached_bottom = True
else:
min_k -= 1
ks.append(min_k)
new_val = np.exp(llh_func(min_k, **llh_kwargs))
unsorted_pdf.append(
new_val)
prob += new_val
max_k += 1
ks.append(max_k)
new_val = np.exp(llh_func(max_k, **llh_kwargs))
unsorted_pdf.append(new_val)
prob += new_val
ks = np.array(ks)
unsorted_pdf = np.array(unsorted_pdf)
sort_idx = np.argsort(ks)
sorted_ks = ks[sort_idx]
sorted_pdf = unsorted_pdf[sort_idx]
return sorted_ks, sorted_pdf
def map_aggarwal_ratio(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the ratios are above the expectation values, i.e. the
transformed values will be in the range of [0, 1].
If False, the ratios are below the expectation values in each bin
and the transformed values will be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = np.copy(flattened_y)
transformed_values[finite] = finite_y
is_nan = np.isnan(flattened_y)
is_pos_inf = np.isposinf(flattened_y)
is_neg_inf = np.isneginf(flattened_y)
got_divided_by_zero = flattened_y == 1.
if upper:
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
else:
transformed_values[finite] *= -1.
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
transformed_values[got_divided_by_zero] = 0
transformed_values = transformed_values.reshape(y_values.shape)
return transformed_values, y_min
def map_aggarwal_limits(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
This function is similar to `map_aggarwal_ratio`, but the handling
of positive and negative infinities are different. These are set to finite
values, such that appropriate limit contours may be drawn.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the limits are upper limits, i.e. the
transformed values will be in the range of [0, 1].
If False, the limits are lower limits and the transformed values will
be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = | np.copy(flattened_y) | numpy.copy |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 09:18:30 2020
@author: firo
"""
import xarray as xr
import os
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
def weighted_ecdf(data, weight = False):
"""
input: 1D arrays of data and corresponding weights
sets weight to 1 if no weights given (= "normal" ecdf, but better than the statsmodels version)
"""
if not np.any(weight):
weight = np.ones(len(data))
sorting = np.argsort(data)
x = data[sorting]
weight = weight[sorting]
y = np.cumsum(weight)/weight.sum()
# clean duplicates, statsmodels does not do this, but it is necessary for us
x_clean = np.unique(x)
y_clean = np.zeros(x_clean.shape)
for i in range(len(x_clean)):
y_clean[i] = y[x==x_clean[i]].max()
return x_clean, y_clean
def generalized_gamma_cdf(x, xm, d, b, x0):
y = sp.special.gammainc(d/b, ((x-x0)/xm)**b)/sp.special.gamma(d/b)
return y
def generalized_gamma(x, xm, d, b, x0):
y= b/xm**d/sp.special.gamma(d/b)*(x-x0)**(d-1)*np.exp(-((x-x0)/xm)**b)
return y
sample = 'T3_025_3_III' #sample name, get e.g. by dyn_data.attrs['name']
path = r"W:\Robert_TOMCAT_3_netcdf4_archives\processed_1200_dry_seg_aniso_sep"
file = os.path.join(path, ''.join(['peak_diff_data_',sample,'.nc']))
diff_data = xr.load_dataset(file)
def waiting_time_from_ecdf(diff_data, n):
"""
Parameters
----------
diff_data : netcdf4
dataset containing waiting times as peak differences.
n : int
number of nodes.
Returns
-------
array of waiting times with lentgh n.
"""
inter_diffs = diff_data['diffs_v2'][2:,:].data
inter_weights = np.ones(inter_diffs.shape)
intra_diffs = diff_data['diffs_v4'][2:,:].data
intra_weights = | np.ones(intra_diffs.shape) | numpy.ones |
#coding:utf-8
#
# a class of gammatone (gammachirp) FIR filter
# filtering uses scipy overlap add convolution
import sys
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal # scipy > 1.14.1
# Check version
# Python 3.6.4 on win32 (Windows 10)
# numpy 1.14.0
# matplotlib 2.1.1
# scipy 1.4.1
class Class_Gammatone(object):
def __init__(self, fc=1000, sampling_rate=48000, gammachirp=False):
# initalize
self.sr= sampling_rate
self.fc= fc # center frequency by unit is [Hz]
self.ERB_width, self.ERB_rate = self.ERB_N( self.fc )
print ('fc, ERB_width', self.fc, self.ERB_width)
self.N= 4 # 4th order
self.a= 1.0
self.b= 1.019
self.phi= 0.0
self.c= 1.0 # only use for gammachirp
self.gammachirp= gammachirp
# self.bz FIR coefficient
# self.nth FIR length
_,_= self.get_gt()
def ERB_N(self, fin):
# Equivalent Rectangular Bandwidth
return 24.7 * ( 4.37 * fin / 1000. + 1.0), 21.4 * np.log10( 4.37 * fin / 1000. + 1.0)
def gt(self, t0): # gammatone
return self.a * np.power( t0, self.N-1) * np.exp( -2.0 * np.pi * self.b * self.ERB_width * t0) * np.cos( 2.0 * np.pi * self.fc * t0 + self.phi)
def gt_chirp(self, t0): # gammachirp
return self.a * np.power( t0, self.N-1) * np.exp( -2.0 * np.pi * self.b * self.ERB_width * t0) * np.cos( 2.0 * np.pi * self.fc * t0 + self.c * np.log(t0) + self.phi)
def get_gt(self,nsec=0.1, neffective=1E-12):
# compute some duration
tlist= np.arange(0.0, (1000.0 / self.fc) * nsec, 1.0/self.sr)
if self.gammachirp :
tlist= tlist[1:]
print ('warning: t is from 1/sampling rate, due to log(0.0) is error')
self.gt_nsec= self.gt_chirp( tlist)
else:
self.gt_nsec= self.gt( tlist)
self.a= 1.0 / np.sum( | np.abs(self.gt_nsec) | numpy.abs |
"""
Contains class Affine2D for preforming affine transformation (general linear
transformation followed by translation) on points (vectors) in 2D.
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
#from past.utils import old_div
from past.builtins import basestring
__version__ = "$Revision$"
import logging
import numpy
import scipy
import scipy.linalg as linalg
from .points import Points
from .affine import Affine
class Affine2D(Affine):
"""
Finds and preforms affine transformation (general linear transformation
followed by translation) on points (vectors) in 2D.
The transformation that transforms points x to points y has the following
form:
y = gl x + d
where:
gl = q s p m
Main methods:
- find: finds a transformation between two sets of points
- transform: transforms a (set of) point(s)
- inverse: calculates inverse transformation
- compose: composition of two transformations
Important attributes and properties (see formulas above):
- d: translation vector
- gl: general linear transformation matrix
- q: rotation matrix
- phi: rotational angle (radians)
- phiDeg: rotational angle (degrees)
- s: scaling matrix (diagonal)
- scale: vector of scaling parameters (diagonal of s)
- p: parity matrix (diagonal)
- parity: parity (+1 or -1)
- m: shear matrix (upper-triangular)
- error: error of transformation for all points
- rmsError: root mean square error of the transformation
"""
def __init__(self, d=None, gl=None, phi=None, scale=None,
parity=1, shear=0, order='qpsm', xy_axes='point_dim'):
"""
Initialization. Following argument combinations are valid:
- no arguments: no transformation matrices are initialized
- d and gl: d and gl are set
- d, phi and scale (parity and shear optional): d and gl
(gl = q p s m) are set
If arg d is None it is set to [0, 0]. If it is a single number it is
set to the same value in both directions.
If the arg xy_axes is 'point_dim' / 'dim_point', points used in this
instance should be specified as n_point x 2 / 2 x n_point
matrices.
Arguments
- gl: gl matrix
- phi: angle
- scale: single number or 1d array
- parity: 1 or -1
- shear: (number) shear
- d: single number, 1d array, or None (means 0)
- order: decomposition order
- xy_axes: order of axes in matrices representing points, can be
'point_dim' (default) or 'dim_point'
"""
# set d
d = Affine.makeD(d, ndim=2)
if (gl is not None) and (d is not None):
super(self.__class__, self).__init__(
gl, d, order=order, xy_axes=xy_axes)
elif (phi is not None) and (scale is not None) and (d is not None):
if not isinstance(scale, (numpy.ndarray, list)):
scale = self.makeS(scale)
elif isinstance(scale, numpy.ndarray) and (len(scale.shape) == 1):
scale = self.makeS(scale)
elif isinstance(scale, list) and not isinstance(scale[0], list):
scale = self.makeS(scale)
qp = numpy.inner(self.makeQ(phi), self.makeP(parity))
sm = numpy.inner(scale, self.makeM(shear))
gl = numpy.inner(qp, sm)
super(self.__class__, self).__init__(
gl, d, order=order, xy_axes=xy_axes)
else:
raise ValueError("Transformation could not be created because "
+ " not enough parameters were specified.")
@classmethod
def downcast(cls, affine):
"""
Returns instance of this class that was obtained by dowoncasting
art affine (instance of Affine, base class of this class).
Argument:
- affine: instance of Affine
"""
# copy gl and d, obligatory
new = cls(gl=affine.gl, d=affine.d, xy_axes=affine.xy_axes)
# copy attributes that are not obligarory
for name in ['order', 'resids', 'rank', 'singular', 'error', '_xPrime',
'_yPrime', 'q', 'p', 's', 'm', 'xy_axes']:
try:
value = getattr(affine, name)
setattr(new, name, value)
except AttributeError:
pass
return new
##############################################################
#
# Parameters
#
@classmethod
def identity(cls, ndim=2):
"""
Returnes an identity object of this class, that is a transformation
that leaves all vectors invariant.
Argument ndim is ignored, it should be 2 here.
"""
obj = cls.__base__.identity(ndim=2)
return obj
@classmethod
def makeQ(cls, phi):
"""
Returns rotation matrix corresponding to angle phi
"""
q = numpy.array([[numpy.cos(phi), -numpy.sin(phi)],
[numpy.sin(phi), numpy.cos(phi)]])
return q
@classmethod
def getAngle(cls, q):
"""
Returns angle corresponding to the rotation matrix specified by arg q
"""
res = numpy.arctan2(q[1,0], q[0,0])
return res
@classmethod
def makeS(cls, scale):
"""
Returns scale transformation corresponding to 1D array scale.
Argument:
- scale: can be given as an 1d array (or a list), or as a single
number in which case the scale is the same in all directions
"""
s = cls.__base__.makeS(scale=scale, ndim=2)
return s
@classmethod
def makeP(cls, parity, axis=-1):
"""
Returns parity matrix corresponding to arg parity.
If parity is -1, the element of the parity matrix corresponding to
axis is set to -1 (all other are 1).
Arguments:
- parity: can be 1 or -1
- axis: axis denoting parity element that can be -1
"""
p = cls.__base__.makeP(parity=parity, axis=axis, ndim=2)
return p
@classmethod
def makeM(cls, shear):
"""
Returns share matrix corresponding to (arg) shear.
"""
m = numpy.array([[1, shear],
[0, 1]])
return m
@classmethod
def makeD(cls, d, ndim=2):
"""
Returns d (translation) array corresponding to arg parity.
Arguments:
- d: (single number) translation
"""
d = cls.__base__.makeD(d, ndim=ndim)
return d
def getPhi(self):
"""
Rotation angle of matrix self.q in radians.
"""
#try:
# qq = self.q
#except AttributeError:
# self.decompose(order='qpsm')
res = numpy.arctan2(self.q[1,0], self.q[0,0])
return res
def setPhi(self, phi):
"""
Sets transformation matrices related to phi (q and gl). Matrix gl is
calculated using the current values of other matrices (p, s and m).
"""
self.q = self.makeQ(phi)
try:
gg = self.gl
self.gl = self.composeGl()
except AttributeError:
pass
phi = property(fget=getPhi, fset=setPhi, doc='Rotation angle in radians')
def getPhiDeg(self):
"""
Rotation angle in degrees
"""
res = self.phi * 180 / numpy.pi
return res
def setPhiDeg(self, phi):
"""
Sets transformation matrices related to phi (q and gl). Matrix gl is
calculated using the current values of other matrices (p, s and m).
"""
phi_rad = phi * numpy.pi / 180
self.q = self.makeQ(phi_rad)
try:
gg = self.gl
self.gl = self.composeGl()
except AttributeError:
pass
phiDeg = property(fget=getPhiDeg, fset=setPhiDeg,
doc='Rotation angle in degrees')
def getUAngle(self):
"""
Returns angle alpha corresponding to rotation matrix self.u
"""
return self.getAngle(q=self.u)
def setUAngle(self, angle):
"""
Sets U matrix (as in usv decomposition) and adjusts gl.
"""
self.u = self.makeQ(angle)
self.gl = self.composeGl()
uAngle = property(fget=getUAngle, fset=setUAngle,
doc='Rotation angle corresponding to matrix U in radians')
def getUAngleDeg(self):
"""
Returns angle alpha corresponding to rotation matrix self.u
"""
res = self.getAngle(q=self.u) * 180 / numpy.pi
return res
def setUAngleDeg(self, angle):
"""
Sets U matrix (as in usv decomposition) and adjusts gl.
"""
angle_rad = angle * numpy.pi / 180
self.u = self.makeQ(angle_rad)
self.gl = self.composeGl()
uAngleDeg = property(fget=getUAngleDeg, fset=setUAngleDeg,
doc='Rotation angle corresponding to matrix U in degrees')
def getVAngle(self):
"""
Returns angle alpha corresponding to rotation matrix self.v
"""
return self.getAngle(q=self.v)
def setVAngle(self, angle):
"""
Sets V matrix (as in usv decomposition) and adjusts gl.
"""
self.v = self.makeQ(angle)
self.gl = self.composeGl()
vAngle = property(fget=getVAngle, fset=setVAngle,
doc='Rotation angle corresponding to matrix V in radians')
def getVAngleDeg(self):
"""
Returns angle alpha corresponding to rotation matrix self.v
"""
res = self.getAngle(q=self.v) * 180 / numpy.pi
return res
def setVAngleDeg(self, angle):
"""
Sets V matrix (as in usv decomposition) and adjusts gl.
"""
angle_rad = angle * numpy.pi / 180
self.v = self.makeQ(angle_rad)
self.gl = self.composeGl()
vAngleDeg = property(fget=getVAngleDeg, fset=setVAngleDeg,
doc='Rotation angle corresponding to matrix V in degrees')
def getScaleAngle(self):
"""
Returns angle (in rad) that corresponds to the scaling:
arccos(scale_smaller / scale_larger)
where scale_smaller and scale_larger are the smaller and larger scale
factors, respectively.
Rotation of an 2D object by this angle around x-axis in 3D is eqivalent
to scaling this object by self.scale (up to a common scale factor).
"""
ratio = self.scale[1] / self.scale[0]
if ratio > 1:
ratio = 1. / ratio
res = numpy.arccos(ratio)
return res
scaleAngle = property(
fget=getScaleAngle,
doc='Angle corresponding to the ratio of scales (in rad)')
def getScaleAngleDeg(self):
"""
Returns angle in degrees that corresponds to the scaling:
arccos(scale[1]/scale[0])
Rotation of an 2D object by this angle around x-axis in 3D is eqivalent
to scaling this object by self.scale (up to a common scale factor).
"""
return self.scaleAngle * 180 / numpy.pi
scaleAngleDeg = property(
fget=getScaleAngleDeg,
doc='Angle corresponding to the ratio of scales in degrees')
def getShear(self):
"""
Shear
"""
try:
mm = self.m
except AttributeError:
self.decompose()
res = self.m[0, 1]
return res
shear = property(fget=getShear, doc='Shear')
##############################################################
#
# Finding and applying transformations
#
@classmethod
def find(
cls, x, y, x_ref='cm', y_ref='cm', type_='gl', xy_axes='point_dim'):
"""
Finds affine transformation (general linear transformation folowed by a
translation) that minimizes square error for transforming points x to
points y in 2D. The transformation has the form
y = gl x + d, (1)
and:
gl = q s p m for type_='gl'
gl = S q p for type_='rs'
where d is translation vector, q, s, p and m are rotation, scaling,
parity and shear matrices, respectivly and S is a scalar scale (same
for both directions)
In the default mode (x_ref='cm' and y_ref='cm') the parameters are
calculated by minimizing square error to get gl from:
y - y_cm = gl (x - x_cm) and d = y_cm - gl x_cm
where x_cm and y_cm are the centers of mass for x and y respectivly.
In this case the square error of eq 1 is minimized
In case args x_ref and y_ref are coordinates, gl is determined by
minimizing square error in:
y - y_ref = gl (x - x_ref) and d = y_ref - gl x_ref
Note that in this case the parameters found do not minimize the error
of eq 1.
In case type_='gl', general linear transformation (matrix gl) is
calculated using Affine.find which in turn uses scipy.linalg.lstsq.
Alternatively, if type_='rs', rotation angle parity and scale are
calculated using findRS() method.
Arguments:
- x, y: sets of points, both having shape (n_points, n_dim)
- x_ref, y_ref: (ndarray) coordinates of reference points, or 'cm' to
use center of mass
Returns the transformation found as an instance of class cls, with
following attributes:
- gl: general linear transformation matrix
- d: translation vector
- q, p, s, m: rotation, parity, scale and shear matrices
- error: difference between y and transformed x values
- resids, rank, singular: values returned from scipy.linalg.lstsq
- _xPrime: x - x_ref
- _yPrime: y - y_ref
- type_: type of the optimization, 'gl' to find Gl transformation
that optimizes the square error, or 'rs' to find the best rotation
and one scale (currently implemented for 2D transformations only)
"""
if type_ == 'gl':
# run Affine.base and downcast
base_inst = cls.__base__.find(
x=x, y=y, x_ref=x_ref, y_ref=y_ref, xy_axes=xy_axes)
inst = cls.downcast(affine=base_inst)
elif type_ == 'rs':
# call special method for 'rs' type in 2D
inst = cls.findRS(
x=x, y=y, x_ref=x_ref, y_ref=y_ref, xy_axes=xy_axes)
else:
raise ValueError("Argument type_: ", type_, "was not ",
"understood. Valid values are 'gl', and 'rs'.")
return inst
@classmethod
def findRS(cls, x, y, x_ref='cm', y_ref='cm', xy_axes='point_dim'):
"""
Finds transformation consisting of rotation, single scale factor and
translation in 2D that minimizes square error for transforming points
x to points y. The transformation has the form
y = gl x + d, gl = S q p (1)
where d is translation vector, q and p are rotation and parity
matrices, respectivly and S is a scalar scale (same for both
directions)
In the default mode (x_ref='cm' and y_ref='cm') the parameters are
calculated by minimizing square error to get gl from:
y - y_cm = gl (x - x_cm) and d = y_cm - gl x_cm
where x_cm and y_cm are the centers of mass for x and y respectivly.
In this case the square error of eq 1 is minimized
In case args x_ref and y_ref are coordinates, gl is determined by
minimizing square error in:
y - y_ref = gl (x - x_ref) and d = y_ref - gl x_ref
Note that in this case the parameters found do not minimize the error
of eq 1.
In center of mass coordinates, scale and parity are calculated
directly using:
S = sqrt( det(yx) / det(xx) )
P = sign( det(yx) / det(xx) )
Rotation angle is calculated so that the square error is minimized:
tan(phi + pi/2) = tr(y p x) / tr(y r0 p x)
where:
r0 = 0 -1
1 0
Arguments:
- x, y: sets of points, both having shape (n_points, n_dim)
- x_ref, y_ref: (ndarray) coordinates of reference points, or 'cm' to
use center of mass
Returns the transformation found as an instance of class cls, with
following attributes:
- gl: general linear transformation matrix
- d: translation vector
- q, p, s, m: rotation, parity, scale and shear matrices
- error: difference between y and transformed x values
- resids, rank, singular: values returned from scipy.linalg.lstsq
Note: To be replaced by SVD based method
"""
# bring x and y to n_points x n_dim shape
if xy_axes == 'point_dim':
pass
elif xy_axes == 'dim_point':
x = x.transpose()
y = y.transpose()
else:
raise ValueError(
"Argument xy_axes was not understood. Possible values are: "
+ "'point_dim' and 'dim_point'.")
# bring x to reference frame
if isinstance(x_ref, basestring) and (x_ref == 'cm'):
x_ref = numpy.mean(x, axis=0)
elif isinstance(x_ref, (list, tuple, numpy.ndarray)):
pass
else:
raise ValueError(\
'Argument x_ref: ', x_ref, ' was not understood.',
" Allowed values are None, 'cm', or an array.")
x_prime = x - x_ref
# bring y to reference frame
if isinstance(y_ref, basestring) and (y_ref == 'cm'):
y_ref = | numpy.mean(y, axis=0) | numpy.mean |
import re
import os
import argparse
import json
import random
import numpy as np
import torch
import torch.utils.data
from scipy.io.wavfile import read
from scipy.stats import betabinom
from audio_processing import TacotronSTFT
from text import text_to_sequence, cmudict, _clean_text, get_arpabet
def beta_binomial_prior_distribution(phoneme_count, mel_count):
P, M = phoneme_count, mel_count
x = | np.arange(0, P) | numpy.arange |
import os
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import mikeio
from mikeio import Dataset, Dfsu, Dfs2, Dfs0
from mikeio.eum import EUMType, ItemInfo, EUMUnit
@pytest.fixture
def ds1():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 0.1
d2 = np.zeros([nt, ne]) + 0.2
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
@pytest.fixture
def ds2():
nt = 10
ne = 7
d1 = np.zeros([nt, ne]) + 1.0
d2 = np.zeros([nt, ne]) + 2.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
return Dataset(data, time, items)
def test_create_wrong_data_type_error():
data = ["item 1", "item 2"]
nt = 2
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
with pytest.raises(TypeError, match="numpy"):
Dataset(data=data, time=time)
def test_get_names():
data = []
nt = 100
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.items[0].name == "Foo"
assert ds.items[0].type == EUMType.Undefined
assert repr(ds.items[0].unit) == "undefined"
def test_select_subset_isel():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel(10, axis=1)
assert len(selds.items) == 2
assert len(selds.data) == 2
assert selds["Foo"].shape == (100, 30)
assert selds["Foo"][0, 0] == 2.0
assert selds["Bar"][0, 0] == 3.0
def test_select_subset_isel_axis_out_of_range_error(ds2):
assert len(ds2.shape) == 2
dss = ds2.isel(idx=0)
# After subsetting there is only one dimension
assert len(dss.shape) == 1
with pytest.raises(ValueError):
dss.isel(idx=0, axis="spatial")
def test_select_temporal_subset_by_idx():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range(start=datetime(2000, 1, 1), freq="S", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel([0, 1, 2], axis=0)
assert len(selds) == 2
assert selds["Foo"].shape == (3, 100, 30)
def test_temporal_subset_fancy():
nt = (24 * 31) + 1
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-1", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
assert ds.time[0].hour == 0
assert ds.time[-1].hour == 0
selds = ds["2000-01-01 00:00":"2000-01-02 00:00"]
assert len(selds) == 2
assert selds["Foo"].shape == (25, 100, 30)
selds = ds[:"2000-01-02 00:00"]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-31 00:00":]
assert selds["Foo"].shape == (25, 100, 30)
selds = ds["2000-01-30":]
assert selds["Foo"].shape == (49, 100, 30)
def test_subset_with_datetime_is_not_supported():
nt = (24 * 31) + 1
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
with pytest.raises(ValueError):
ds[datetime(2000, 1, 1)]
def test_select_item_by_name():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
foo_data = ds["Foo"]
assert foo_data[0, 10, 0] == 2.0
def test_select_multiple_items_by_name():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d3 = np.zeros([nt, 100, 30]) + 3.0
data = [d1, d2, d3]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
# items = [ItemInfo("Foo"), ItemInfo("Bar"), ItemInfo("Baz")]
items = [ItemInfo(x) for x in ["Foo", "Bar", "Baz"]]
ds = Dataset(data, time, items)
assert len(ds) == 3 # Length of a dataset is the number of items
newds = ds[["Baz", "Foo"]]
assert newds.items[0].name == "Baz"
assert newds.items[1].name == "Foo"
assert newds["Foo"][0, 10, 0] == 1.5
assert len(newds) == 2
def test_select_multiple_items_by_index():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d3 = np.zeros([nt, 100, 30]) + 3.0
data = [d1, d2, d3]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo(x) for x in ["Foo", "Bar", "Baz"]]
ds = Dataset(data, time, items)
assert len(ds) == 3 # Length of a dataset is the number of items
newds = ds[[2, 0]]
assert newds.items[0].name == "Baz"
assert newds.items[1].name == "Foo"
assert newds["Foo"][0, 10, 0] == 1.5
assert len(newds) == 2
def test_select_item_by_iteminfo():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
d1[0, 10, :] = 2.0
d2[0, 10, :] = 3.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
foo_item = items[0]
foo_data = ds[foo_item]
assert foo_data[0, 10, 0] == 2.0
def test_select_subset_isel_multiple_idxs():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
selds = ds.isel([10, 15], axis=1)
assert len(selds.items) == 2
assert len(selds.data) == 2
assert selds["Foo"].shape == (100, 2, 30)
def test_decribe(ds1):
df = ds1.describe()
assert df.columns[0] == "Foo"
assert df.loc["mean"][1] == pytest.approx(0.2)
assert df.loc["max"][0] == pytest.approx(0.1)
def test_create_undefined():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
# items = 2
ds = Dataset(data, time)
assert len(ds.items) == 2
assert len(ds.data) == 2
assert ds.items[0].name == "Item 1"
assert ds.items[0].type == EUMType.Undefined
def test_create_named_undefined():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
ds = Dataset(data=data, time=time, items=["Foo", "Bar"])
assert len(ds.items) == 2
assert len(ds.data) == 2
assert ds.items[1].name == "Bar"
assert ds.items[1].type == EUMType.Undefined
def test_to_dataframe_single_timestep():
nt = 1
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
df = ds.to_dataframe()
assert list(df.columns) == ["Foo", "Bar"]
assert isinstance(df.index, pd.DatetimeIndex)
def test_to_dataframe():
nt = 100
d1 = np.zeros([nt])
d2 = np.zeros([nt])
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
df = ds.to_dataframe()
assert list(df.columns) == ["Foo", "Bar"]
assert isinstance(df.index, pd.DatetimeIndex)
def test_multidimensional_to_dataframe_no_supported():
nt = 100
d1 = np.zeros([nt, 2])
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset([d1], time, items)
with pytest.raises(ValueError):
ds.to_dataframe()
def test_get_data():
data = []
nt = 100
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.data[0].shape == (100, 100, 30)
def test_interp_time():
nt = 4
d = np.zeros([nt, 10, 3])
d[1] = 2.0
d[3] = 4.0
data = [d]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds.data[0].shape == (nt, 10, 3)
dsi = ds.interp_time(dt=3600)
assert ds.time[0] == dsi.time[0]
assert dsi.data[0].shape == (73, 10, 3)
def test_interp_time_to_other_dataset():
# Arrange
## Dataset 1
nt = 4
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2
nt = 12
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
# Act
## interp
dsi = ds1.interp_time(dt=ds2.time)
# Assert
assert dsi.time[0] == ds2.time[0]
assert dsi.time[-1] == ds2.time[-1]
assert len(dsi.time) == len(ds2.time)
assert dsi.data[0].shape[0] == ds2.data[0].shape[0]
# Accept dataset as argument
dsi2 = ds1.interp_time(ds2)
assert dsi2.time[0] == ds2.time[0]
def test_extrapolate():
# Arrange
## Dataset 1
nt = 2
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2 partly overlapping with Dataset 1
nt = 3
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
# Act
## interp
dsi = ds1.interp_time(dt=ds2.time, fill_value=1.0)
# Assert
assert dsi.time[0] == ds2.time[0]
assert dsi.time[-1] == ds2.time[-1]
assert len(dsi.time) == len(ds2.time)
assert dsi.data[0][0] == pytest.approx(0.0)
assert dsi.data[0][1] == pytest.approx(1.0) # filled
assert dsi.data[0][2] == pytest.approx(1.0) # filled
def test_extrapolate_not_allowed():
## Dataset 1
nt = 2
data = [np.zeros([nt, 10, 3])]
time = pd.date_range("2000-1-1", freq="D", periods=nt)
items = [ItemInfo("Foo")]
ds1 = Dataset(data, time, items)
assert ds1.data[0].shape == (nt, 10, 3)
## Dataset 2 partly overlapping with Dataset 1
nt = 3
data = [np.ones([nt, 10, 3])]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds2 = Dataset(data, time, items)
with pytest.raises(ValueError):
dsi = ds1.interp_time(dt=ds2.time, fill_value=1.0, extrapolate=False)
def test_get_data_2():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert data[0].shape == (100, 100, 30)
def test_get_data_name():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
assert ds["Foo"].shape == (100, 100, 30)
def test_set_data_name():
nt = 100
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset([np.zeros((nt, 10))], time, items)
assert ds["Foo"][0, 0] == 0.0
ds["Foo"] = np.zeros((nt, 10)) + 1.0
assert ds["Foo"][0, 0] == 1.0
ds[0] = np.zeros((nt, 10)) + 2.0 # Set using position
assert ds["Foo"][0, 0] == 2.0 # Read using name
with pytest.raises(ValueError):
ds[[0, 1]] = (
np.zeros((nt, 10)) + 2.0
) # You can't set data for several items (at least not yet)
def test_get_bad_name():
nt = 100
data = []
d = np.zeros([100, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
with pytest.raises(Exception):
ds["BAR"]
def test_head():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
dshead = ds.head()
assert len(dshead.time) == 5
assert ds.time[0] == dshead.time[0]
dshead10 = ds.head(n=10)
assert len(dshead10.time) == 10
def test_head_small_dataset():
nt = 2
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
dshead = ds.head()
assert len(dshead.time) == nt
def test_tail():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
dstail = ds.tail()
assert len(dstail.time) == 5
assert ds.time[-1] == dstail.time[-1]
dstail10 = ds.tail(n=10)
assert len(dstail10.time) == 10
def test_thin():
nt = 100
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
dsthin = ds.thin(2)
assert len(dsthin.time) == 50
def test_tail_small_dataset():
nt = 2
data = []
d = np.zeros([nt, 100, 30]) + 1.0
data.append(d)
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset(data, time, items)
dstail = ds.tail()
assert len(dstail.time) == nt
def test_flipud():
nt = 2
d = np.random.random([nt, 100, 30])
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo")]
ds = Dataset([d], time, items)
dsud = ds.copy()
dsud.flipud()
assert dsud.shape == ds.shape
assert dsud["Foo"][0, 0, 0] == ds["Foo"][0, -1, 0]
def test_aggregation_workflows(tmpdir):
filename = "tests/testdata/HD2D.dfsu"
dfs = Dfsu(filename)
ds = dfs.read(["Surface elevation", "Current speed"])
ds2 = ds.max(axis=1)
outfilename = os.path.join(tmpdir.dirname, "max.dfs0")
ds2.to_dfs0(outfilename)
assert os.path.isfile(outfilename)
ds3 = ds.min(axis=1)
outfilename = os.path.join(tmpdir.dirname, "min.dfs0")
ds3.to_dfs0(outfilename)
assert os.path.isfile(outfilename)
def test_aggregations():
filename = "tests/testdata/gebco_sound.dfs2"
dfs = Dfs2(filename)
ds = dfs.read()
for axis in [0, 1, 2]:
ds.mean(axis=axis)
ds.nanmean(axis=axis)
ds.nanmin(axis=axis)
ds.nanmax(axis=axis)
assert ds.mean(axis=None).shape == (1,)
assert ds.mean(axis=(1, 2)).shape == (1,)
assert ds.mean(axis=(0, 1)).shape == (1, 216)
assert ds.mean(axis=(0, 2)).shape == (1, 264)
assert ds.mean(axis="spatial").shape == (1,)
assert ds.mean(axis="space").shape == (1,)
with pytest.raises(ValueError, match="space"):
ds.mean(axis="spaghetti")
def test_weighted_average(tmpdir):
filename = "tests/testdata/HD2D.dfsu"
dfs = Dfsu(filename)
ds = dfs.read(["Surface elevation", "Current speed"])
area = dfs.get_element_area()
ds2 = ds.average(weights=area, axis=1)
outfilename = os.path.join(tmpdir.dirname, "average.dfs0")
ds2.to_dfs0(outfilename)
assert os.path.isfile(outfilename)
def test_quantile_axis1(ds1):
dsq = ds1.quantile(q=0.345, axis=1)
assert dsq[0][0] == 0.1
assert dsq[1][0] == 0.2
assert dsq.n_items == ds1.n_items
assert dsq.n_timesteps == ds1.n_timesteps
# q as list
dsq = ds1.quantile(q=[0.25, 0.75], axis=1)
assert dsq.n_items == 2 * ds1.n_items
assert "Quantile 0.75, " in dsq.items[1].name
assert "Quantile 0.25, " in dsq.items[2].name
def test_quantile_axis0(ds1):
dsq = ds1.quantile(q=0.345) # axis=0 is default
assert dsq[0][0, 0] == 0.1
assert dsq[1][0, 0] == 0.2
assert dsq.n_items == ds1.n_items
assert dsq.n_timesteps == 1
assert dsq.shape[-1] == ds1.shape[-1]
# q as list
dsq = ds1.quantile(q=[0.25, 0.75], axis=0)
assert dsq[0][0, 0] == 0.1
assert dsq[1][0, 0] == 0.1
assert dsq[2][0, 0] == 0.2
assert dsq[3][0, 0] == 0.2
assert dsq.n_items == 2 * ds1.n_items
assert "Quantile 0.75, " in dsq.items[1].name
assert "Quantile 0.25, " in dsq.items[2].name
assert "Quantile 0.75, " in dsq.items[3].name
def test_nanquantile():
q = 0.99
fn = "tests/testdata/random.dfs0" # has delete value
ds = Dfs0(fn).read()
dsq1 = ds.quantile(q=q)
dsq2 = ds.nanquantile(q=q)
assert np.isnan(dsq1[0])
assert not np.isnan(dsq2[0])
qnt = np.quantile(ds[0], q=q)
nqnt = np.nanquantile(ds[0], q=q)
assert np.isnan(qnt)
assert dsq2[0] == nqnt
def test_copy():
nt = 100
d1 = np.zeros([nt, 100, 30]) + 1.5
d2 = np.zeros([nt, 100, 30]) + 2.0
data = [d1, d2]
time = pd.date_range("2000-1-2", freq="H", periods=nt)
items = [ItemInfo("Foo"), ItemInfo("Bar")]
ds = Dataset(data, time, items)
assert len(ds.items) == 2
assert len(ds.data) == 2
assert ds.items[0].name == "Foo"
ds2 = ds.copy()
ds2.items[0].name = "New name"
assert ds2.items[0].name == "New name"
assert ds.items[0].name == "Foo"
def test_dropna():
nt = 10
d1 = | np.zeros([nt, 100, 30]) | numpy.zeros |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not | np.array_equal(observed, images) | numpy.array_equal |
import os
import glob
import random
import numpy as np
import trimesh
import imageio
import struct
from mesh2tex.data.core import Field
# Make sure loading xlr works
imageio.plugins.freeimage.download()
# Basic index field
class IndexField(Field):
def load(self, model_path, idx):
return idx
def check_complete(self, files):
return True
class MeshField(Field):
def __init__(self, folder_name, transform=None):
self.folder_name = folder_name
self.transform = transform
def load(self, model_path, idx):
folder_path = os.path.join(model_path, self.folder_name)
file_path = os.path.join(folder_path, 'model.off')
mesh = trimesh.load(file_path, process=False)
if self.transform is not None:
mesh = self.transform(mesh)
data = {
'vertices': np.array(mesh.vertices),
'faces': np.array(mesh.faces),
}
return data
def check_complete(self, files):
complete = (self.folder_name in files)
return complete
# Image field
class ImagesField(Field):
def __init__(self, folder_name, transform=None,
extension='jpg', random_view=True,
with_camera=False,
imageio_kwargs=dict()):
self.folder_name = folder_name
self.transform = transform
self.extension = extension
self.random_view = random_view
self.with_camera = with_camera
self.imageio_kwargs = dict()
def load(self, model_path, idx):
folder = os.path.join(model_path, self.folder_name)
files = glob.glob(os.path.join(folder, '*.%s' % self.extension))
files.sort()
if self.random_view:
idx_img = random.randint(0, len(files)-1)
else:
idx_img = 0
filename = files[idx_img]
image = imageio.imread(filename, **self.imageio_kwargs)
image = | np.asarray(image) | numpy.asarray |
#!/usr/bin/env python
import gzip
import logging
import os.path
import time
from typing import List, Tuple
import anndata
import numpy as np
import pandas as pd
import tables
from scipy.io import mmread
from scipy.sparse import csr_matrix, issparse
from . import Array2D, MemData
logger = logging.getLogger("pegasus")
def load_10x_h5_file_v2(h5_in: "tables.File", fn: str, ngene: int = None) -> "MemData":
"""Load 10x v2 format matrix from hdf5 file
Parameters
----------
h5_in : tables.File
An instance of tables.File class that is connected to a 10x v2 formatted hdf5 file.
fn : `str`
File name, can be used to indicate channel-specific name prefix.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file_v2(h5_in)
"""
data = MemData()
for group in h5_in.list_nodes("/", "Group"):
genome = group._v_name
M, N = h5_in.get_node("/" + genome + "/shape").read()
mat = csr_matrix(
(
h5_in.get_node("/" + genome + "/data").read(),
h5_in.get_node("/" + genome + "/indices").read(),
h5_in.get_node("/" + genome + "/indptr").read(),
),
shape=(N, M),
)
barcodes = h5_in.get_node("/" + genome + "/barcodes").read().astype(str)
ids = h5_in.get_node("/" + genome + "/genes").read().astype(str)
names = h5_in.get_node("/" + genome + "/gene_names").read().astype(str)
array2d = Array2D(
{"barcodekey": barcodes}, {"featurekey": ids, "featurename": names}, mat
)
array2d.filter(ngene=ngene)
array2d.separate_channels(fn)
data.addData(genome, array2d)
return data
def load_10x_h5_file_v3(h5_in: "tables.File", fn: str, ngene: int = None) -> "MemData":
"""Load 10x v3 format matrix from hdf5 file
Parameters
----------
h5_in : tables.File
An instance of tables.File class that is connected to a 10x v3 formatted hdf5 file.
fn : `str`
File name, can be used to indicate channel-specific name prefix.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file_v3(h5_in)
"""
M, N = h5_in.get_node("/matrix/shape").read()
bigmat = csr_matrix(
(
h5_in.get_node("/matrix/data").read(),
h5_in.get_node("/matrix/indices").read(),
h5_in.get_node("/matrix/indptr").read(),
),
shape=(N, M),
)
barcodes = h5_in.get_node("/matrix/barcodes").read().astype(str)
genomes = h5_in.get_node("/matrix/features/genome").read().astype(str)
ids = h5_in.get_node("/matrix/features/id").read().astype(str)
names = h5_in.get_node("/matrix/features/name").read().astype(str)
data = MemData()
for genome in np.unique(genomes):
idx = genomes == genome
barcode_metadata = {"barcodekey": barcodes}
feature_metadata = {"featurekey": ids[idx], "featurename": names[idx]}
mat = bigmat[:, idx].copy()
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene)
array2d.separate_channels(fn)
data.addData(genome, array2d)
return data
def load_10x_h5_file(input_h5: str, ngene: int = None) -> "MemData":
"""Load 10x format matrix (either v2 or v3) from hdf5 file
Parameters
----------
input_h5 : `str`
The matrix in 10x v2 or v3 hdf5 format.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing genome-Array2D pair per genome.
Examples
--------
>>> io.load_10x_h5_file('example_10x.h5')
"""
fn = os.path.basename(input_h5)[:-3]
data = None
with tables.open_file(input_h5) as h5_in:
try:
node = h5_in.get_node("/matrix")
data = load_10x_h5_file_v3(h5_in, fn, ngene)
except tables.exceptions.NoSuchNodeError:
data = load_10x_h5_file_v2(h5_in, fn, ngene)
return data
def determine_file_name(
path: str, names: List[str], errmsg: str, fname: str = None, exts: List[str] = None
) -> str:
""" Try several file name options and determine which one is correct.
"""
for name in names:
file_name = os.path.join(path, name)
if os.path.isfile(file_name):
return file_name
if fname is not None:
for ext in exts:
file_name = fname + ext
if os.path.isfile(file_name):
return file_name
raise ValueError(errmsg)
def load_one_mtx_file(path: str, ngene: int = None, fname: str = None) -> "Array2D":
"""Load one gene-count matrix in mtx format into an Array2D object
"""
mtx_file = determine_file_name(
path,
["matrix.mtx.gz", "matrix.mtx"],
"Expression matrix in mtx format is not found",
fname=fname,
exts=[".mtx"],
)
mat = csr_matrix(mmread(mtx_file).T)
barcode_file = determine_file_name(
path,
["cells.tsv.gz", "barcodes.tsv.gz", "barcodes.tsv"],
"Barcode metadata information is not found",
fname=fname,
exts=["_barcode.tsv", ".cells.tsv"],
)
feature_file = determine_file_name(
path,
["genes.tsv.gz", "features.tsv.gz", "genes.tsv"],
"Feature metadata information is not found",
fname=fname,
exts=["_gene.tsv", ".genes.tsv"],
)
barcode_base = os.path.basename(barcode_file)
feature_base = os.path.basename(feature_file)
if barcode_base == "cells.tsv.gz" and feature_base == "genes.tsv.gz":
format_type = "HCA DCP"
elif barcode_base == "barcodes.tsv.gz" and feature_base == "features.tsv.gz":
format_type = "10x v3"
elif barcode_base == "barcodes.tsv" and feature_base == "genes.tsv":
format_type = "10x v2"
elif barcode_base.endswith("_barcode.tsv") and feature_base.endswith("_gene.tsv"):
format_type = "scumi"
elif barcode_base.endswith(".cells.tsv") and feature_base.endswith(".genes.tsv"):
format_type = "dropEst"
else:
raise ValueError("Unknown format type")
if format_type == "HCA DCP":
barcode_metadata = pd.read_csv(barcode_file, sep="\t", header=0)
assert "cellkey" in barcode_metadata
barcode_metadata.rename(columns={"cellkey": "barcodekey"}, inplace=True)
feature_metadata = pd.read_csv(feature_file, sep="\t", header=0)
else:
barcode_metadata = pd.read_csv(
barcode_file, sep="\t", header=None, names=["barcodekey"]
)
if format_type == "10x v3":
feature_metadata = pd.read_csv(
feature_file,
sep="\t",
header=None,
names=["featurekey", "featurename", "featuretype"],
)
elif format_type == "10x v2":
feature_metadata = pd.read_csv(
feature_file, sep="\t", header=None, names=["featurekey", "featurename"]
)
elif format_type == "scumi":
values = (
pd.read_csv(feature_file, sep="\t", header=None)
.iloc[:, 0]
.values.astype(str)
)
arr = np.array(np.char.split(values, sep="_", maxsplit=1).tolist())
feature_metadata = pd.DataFrame(
data={"featurekey": arr[:, 0], "featurename": arr[:, 1]}
)
elif format_type == "dropEst":
feature_metadata = pd.read_csv(
feature_file, sep="\t", header=None, names=["featurekey"]
)
feature_metadata["featurename"] = feature_metadata["featurekey"]
else:
raise ValueError("Unknown format type")
array2d = Array2D(barcode_metadata, feature_metadata, mat)
array2d.filter(ngene=ngene)
if format_type == "10x v3" or format_type == "10x v2":
array2d.separate_channels("") # fn == '' refers to 10x mtx format
return array2d
def load_mtx_file(path: str, genome: str = None, ngene: int = None) -> "MemData":
"""Load gene-count matrix from Market Matrix files (10x v2, v3 and HCA DCP formats)
Parameters
----------
path : `str`
Path to mtx files. The directory impiled by path should either contain matrix, feature and barcode information, or folders containg these information.
genome : `str`, optional (default: None)
Genome name of the matrix. If None, genome will be inferred from path.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing a genome-Array2D pair.
Examples
--------
>>> io.load_10x_h5_file('example_10x.h5')
"""
orig_file = None
if not os.path.isdir(path):
orig_file = path
path = os.path.dirname(path)
data = MemData()
if (
os.path.isfile(os.path.join(path, "matrix.mtx.gz"))
or os.path.isfile(os.path.join(path, "matrix.mtx"))
or (orig_file is not None and os.path.isfile(orig_file))
):
if genome is None:
genome = os.path.basename(path)
data.addData(
genome,
load_one_mtx_file(
path,
ngene=ngene,
fname=None if orig_file is None else os.path.splitext(orig_file)[0],
),
)
else:
for dir_entry in os.scandir(path):
if dir_entry.is_dir():
data.addData(
dir_entry.name, load_one_mtx_file(dir_entry.path, ngene=ngene)
)
return data
def load_csv_file(
input_csv: str, genome: str, sep: str = ",", ngene: int = None
) -> "MemData":
"""Load count matrix from a CSV-style file, such as CSV file or DGE style tsv file.
Parameters
----------
input_csv : `str`
The CSV file, gzipped or not, containing the count matrix.
genome : `str`
The genome reference.
sep: `str`, optional (default: ',')
Separator between fields, either ',' or '\t'.
ngene : `int`, optional (default: None)
Minimum number of genes to keep a barcode. Default is to keep all barcodes.
Returns
-------
An MemData object containing a genome-Array2D pair.
Examples
--------
>>> io.load_csv_file('example_ADT.csv', genome = 'GRCh38')
>>> io.load_csv_file('example.umi.dge.txt.gz', genome = 'GRCh38', sep = '\t')
"""
path = os.path.dirname(input_csv)
base = os.path.basename(input_csv)
is_hca_csv = base == "expression.csv"
if sep == "\t":
# DGE, columns are cells, which is around thousands and we can use pandas.read_csv
df = pd.read_csv(input_csv, header=0, index_col=0, sep=sep)
mat = csr_matrix(df.values.T)
barcode_metadata = {"barcodekey": df.columns.values}
feature_metadata = {
"featurekey": df.index.values,
"featurename": df.index.values,
}
else:
# For CSV files, wide columns prevent fast pd.read_csv loading
converter = (
float if base.startswith("expression") else int
) # If expression -> float otherwise int
barcodes = []
names = []
stacks = []
with (
gzip.open(input_csv, mode="rt")
if input_csv.endswith(".gz")
else open(input_csv)
) as fin:
barcodes = next(fin).strip().split(sep)[1:]
for line in fin:
fields = line.strip().split(sep)
names.append(fields[0])
stacks.append([converter(x) for x in fields[1:]])
mat = csr_matrix( | np.stack(stacks, axis=1 if not is_hca_csv else 0) | numpy.stack |
import numpy as np
import os
# Point exposure for Wall Depositions
#Automation Additions
HPC_toggle = False
multi_run_folder_toggle = False# single or multi folder (True is multi,False is single)
desktop_toggle = True
date = "/20_8_2021/"
# Parameters Geom
voxel_size = 0.3; # voxel size in nanometers (0.27 nm is appr. one atom of Si)(Smith uses 0.25nm for Tungsten(W))
size_x= 401 # horizontal size in the x direction in voxels (now for +/- x)
size_y = 401 # horizontal size in the y direction in voxels (now for +/- y)
size_z = 1001 # vertical size in voxels
volume = size_x*size_y*size_z # total voxel volume
voxel_size_pm = int(voxel_size*1000)
# Parameters Pri
num_x = int(11) # number of pillars in the x direction
num_y = int(21) #number of pillars in the y direction
num_x_range = np.linspace(-(num_x-1)/2,(num_x-1)/2,num_x)
num_y_range = np.linspace(-(num_y-1)/2,(num_y-1)/2,num_y)
x = size_x*voxel_size/2 # starting x
y = size_y*voxel_size/2 # starting y
z = 1 # starting z in nm
N = int(1e3) # Number of electrons per pillar
seq_lines = 100
energy = 1000 # Beam energy, in eV
sigma_beam = 1 # Beam standard deviation in nm
sigma_beam_pm = round(sigma_beam*1000) # Beam standard deviation in pm
tot_e = int(num_x*num_y*N)
#line_pitch_x_list = [6,6.5,7]
#line_pitch_x_list = [3,3.5,4]
line_pitch_x_list = [4.5]
#line_pitch_x_list = np.around(np.linspace(3,4,num=10),decimals=1)
#line_pitch_y_list = [1.7,1.7,1.7] #space between points nm
line_pitch_y_list = np.ones(len(line_pitch_x_list))*1.7
#line_pitch_list = np.around(np.linspace(1,5,num=20),decimals=1) #space between points nm
dep_strat = "seq_ll" #seq_ll (sequential layered lines), seq_d (sequential diagonals)
# This is a numpy datatype that corresponds to pri files
dt = np.dtype([
('x', np.float32), ('y', np.float32), ('z', np.float32), # Starting position
('dx', np.float32), ('dy', np.float32), ('dz', np.float32), # Starting direction
('K', np.float32), # Starting energy
('px', np.uint32), ('py', np.uint32)]) # Pixel index
total_runs = len(line_pitch_y_list)
# For now only iterates over line_pitches --> may update to include any possible combination of parameters
#Folder Location
if multi_run_folder_toggle:
#Multi-Runs - Run all files in folder (for multi-runs exectuable)
if HPC_toggle:
file_path = "/home/richarddejong/nebula_test_files/vox_tri_pri/multi_runs"+date
elif desktop_toggle:
file_path = "C:/Users/Richard/source/repos/Nebula/nebula_test_files/vox_tri_pri/multi_runs"+date
else:
file_path = "C:/Users/richa/Documents/repos/nebula_test_files/vox_tri_pri/multi_runs"+date
else:
#Single Runs - Run a single file (for single-run executable)
if HPC_toggle:
file_path = "/home/richarddejong/nebula_test_files/vox_tri_pri/single_runs"+date
elif desktop_toggle:
file_path = "C:/Users/Richard/source/repos/Nebula/nebula_test_files/vox_tri_pri/single_runs"+date
else:
file_path = "C:/Users/richa/Documents/repos/nebula_test_files/vox_tri_pri/single_runs"+date
if dep_strat == "seq_d":
for i in range(total_runs):
# Update Iterative Parameters
line_pitch_x = line_pitch_x_list[i]
line_pitch_x_pm = int(line_pitch_x*1000)
line_pitch_y = line_pitch_y_list[i]
line_pitch_y_pm = int(line_pitch_y*1000)
#title creation
title_pri = str(int(energy/1000))+"keV_"+str(num_x)+"_"+str(num_y)+"_"+str(int(N/1000))+"kpp_pitchx_"+str(line_pitch_x_pm)+"_pitchy_"+str(line_pitch_y_pm)+"_"+dep_strat+"_"
title_geom = str(size_x)+"_"+str(size_y)+"_"+str(size_z)+"_sb_"+str(sigma_beam_pm)+"_vs_"+str(voxel_size_pm)
title = title_pri+title_geom+".pri"
x_p = []
y_p = []
inputx = np.zeros(tot_e)
inputy = np.zeros(tot_e)
for k in num_x_range:
for j in num_y_range:
startx = k*line_pitch_x+x
starty = j*line_pitch_y+y
xj = np.random.normal(startx, sigma_beam, N)
yj = np.random.normal(starty, sigma_beam, N)
x_p.append(xj)
y_p.append(yj)
for j in range(num_y*num_x):
inputx[j*N:N*(j+1)] = x_p[j]
inputy[j*N:N*(j+1)] = y_p[j]
# Open file
if not os.path.exists(file_path):
os.makedirs(file_path)
with open(file_path+title, 'wb') as file:
# Allocate numpy buffer
array = np.empty(tot_e, dtype=dt)
# Fill with data
array['x'] = inputx
array['y'] = inputy
array['z'] = z
array['dx'] = 0
array['dy'] = 0
array['dz'] = 1
array['K'] = energy
array['px'] = 0
array['py'] = 0
# Write buffer to file
array.tofile(file)
#Progress Tracker
if i%10 == 0:
print("Creating Primary Files :" + str(round(i/total_runs*100)) + "%")
if dep_strat == "seq_ll":
for i in range(total_runs):
# Update Iterative Parameters
line_pitch_x = line_pitch_x_list[i]
line_pitch_x_pm = int(line_pitch_x*1000)
line_pitch_y = line_pitch_y_list[i]
line_pitch_y_pm = int(line_pitch_y*1000)
#title creation
title_pri = str(int(energy/1000))+"keV_"+str(num_x)+"_"+str(num_y)+"_"+str(int(N/1000))+"kpp_pitchx_"+str(line_pitch_x_pm)+"_pitchy_"+str(line_pitch_y_pm)+"_"+dep_strat+"_"
title_geom = str(size_x)+"_"+str(size_y)+"_"+str(size_z)+"_sb_"+str(sigma_beam_pm)+"_vs_"+str(voxel_size_pm)
title = title_pri+title_geom+".pri"
x_p = []
y_p = []
inputx = np.zeros(tot_e)
inputy = | np.zeros(tot_e) | numpy.zeros |
''' CONFIDENTIAL
Copyright (c) 2021 <NAME>,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
try:
import pcl
from pyquaternion import Quaternion
except:
print('cannot import pcl -> change python version')
import matplotlib.cm as cmx
from scipy.spatial import distance_matrix
from scipy.optimize import leastsq
import matplotlib
import matplotlib.animation as animation
import open3d as o3d
import glob
import cv2
import cv2.aruco as aruco
import os
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
import pickle
from matplotlib.lines import Line2D
import pandas as pd
import random
from scipy.spatial import ConvexHull
from math import sqrt
from math import atan2, cos, sin, pi
from collections import namedtuple
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pyquaternion import Quaternion
np.set_printoptions(suppress=True)
def eulerAnglesToRotationMatrix2(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
Rot_matrix = eulerAnglesToRotationMatrix2([0, 0, np.deg2rad(-90)])
InitLidar = True
InitLidar = False
global globalTrigger
globalTrigger = True
stereoRectify = False# True
#stereoRectify = True
class Annotation3D(Annotation):
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self, s, xy=(0, 0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy = (xs, ys)
Annotation.draw(self, renderer)
def save_obj(obj, name):
with open('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
print('{}.pkl Object saved'.format(name))
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def showErros(_3DErros, IMageNames):
print('len(_3DErros)->{}'.format(np.shape(_3DErros)))
if len(_3DErros)>1:
_3DErros = np.array(_3DErros).squeeze()
# norm_total = np.array(_3DErros[:,0]).squeeze()
norm_axis = np.array(_3DErros).squeeze() * 1000
index, bar_width = np.arange(len(IMageNames)), 0.24
fig, ax = plt.subplots()
X = ax.bar(index, norm_axis[:, 0], bar_width, label="X")
Y = ax.bar(index + bar_width, norm_axis[:, 1], bar_width, label="Y")
Z = ax.bar(index + bar_width + bar_width, norm_axis[:, 2], bar_width, label="Z")
ax.set_xlabel('images')
ax.set_ylabel('errors in mm')
ax.set_title('3D error')
ax.set_xticks(index + bar_width / 3)
ax.set_xticklabels(IMageNames)
ax.legend()
plt.show()
def triangulation(kp1, kp2, T_1w, T_2w):
"""Triangulation to get 3D points
Args:
kp1 (Nx2): keypoint in view 1 (normalized)
kp2 (Nx2): keypoints in view 2 (normalized)
T_1w (4x4): pose of view 1 w.r.t i.e. T_1w (from w to 1)
T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
Returns:
X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
"""
kp1_3D = np.ones((3, kp1.shape[0]))
kp2_3D = np.ones((3, kp2.shape[0]))
kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
X /= X[3]
X1 = T_1w[:3].dot(X)
X2 = T_2w[:3].dot(X)
return X[:3].T, X1.T, X2.T
def triangulate(R1,R2,t1,t2,K1,K2,D1,D2, pts1, pts2):
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
_3d_points = []
for i,point in enumerate(pts1):
point3D = cv2.triangulatePoints(P1, P2, pts1[i], pts2[i]).T
point3D = point3D[:, :3] / point3D[:, 3:4]
_3d_points.append(point3D)
print('Triangulate _3d_points -> {}'.format(np.shape(_3d_points)))
return np.array(_3d_points).squeeze()
def mai(R1,R2,t1,t2,imagePoint1,imagePoint2, K2=None,K1=None, D2=None,D1=None):
# Set up two cameras near each other
if K1 is None:
K = np.array([
[718.856, 0., 607.1928],
[0., 718.856, 185.2157],
[0., 0., 1.],
])
R1 = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
R2 = np.array([
[0.99999183, -0.00280829, -0.00290702],
[0.0028008, 0.99999276, -0.00257697],
[0.00291424, 0.00256881, 0.99999245]
])
t1 = np.array([[0.], [0.], [0.]])
t2 = np.array([[-0.02182627], [0.00733316], [0.99973488]])
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
point3D = cv2.triangulatePoints(P1, P2, imagePoint1, imagePoint2).T
point3D = point3D[:, :3] / point3D[:, 3:4]
print('Triangulate point3D -> {}'.format(point3D))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(R1.T) # Change
rvec2, _ = cv2.Rodrigues(R2.T) # Change
p1, _ = cv2.projectPoints(point3D, rvec1, -t1, K1, distCoeffs=D1) # Change
p2, _ = cv2.projectPoints(point3D, rvec2, -t2, K2, distCoeffs=D2) # Change
# measure difference between original image point and reporjected image point
reprojection_error1 = np.linalg.norm(imagePoint1 - p1[0, :])
reprojection_error2 = np.linalg.norm(imagePoint2 - p2[0, :])
print('difference between original image point and reporjected image point')
print(reprojection_error1, reprojection_error2)
return p1,p2
class PointCloud_filter(object):
def __init__(self, file, img_file=None, img_file2=None, debug=True):
self.debug = debug
self.img_file = img_file
self.img_file2 = img_file2
self.name = os.path.basename(file).split('.')[0]
self.file = file
self.useVoxel, self.voxel_size = False, 0.15
self.lowerTemplate, self.showImage = False, True
self.showError = False
self.points_correspondences = None
self.OK = False
self.useInitialPointCloud = False #user all point to fit or only margins
self.chessBoard = False
self.applyICP_directly = False
self.s = .1 # scale
self.plotInit, self.axis_on, self.colour, self.Annotate = False, True, False, False
self.chess, self.corn, self.p1, self.p2, self.p3, self.ICP_finetune_plot = None, None, None, None, None, None
if self.showImage:
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
self.ImageNames = []
self._3DErros = []
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.s
self.fig = plt.figure(figsize=plt.figaspect(0.5))
self.fig.suptitle('Data collection', fontsize=16)
self.ax = self.fig.add_subplot(1, 2, 1, projection='3d')
#self.ax = self.fig.add_subplot(1, 2, 2, projection='3d')
self.readCameraIntrin()
self.QueryImg = cv2.imread(img_file)
self.ImageNames.append(os.path.basename(img_file))
if self.img_file2: # use stereo case
self.QueryImg2 = cv2.imread(img_file2)
if stereoRectify:
self.QueryImg = cv2.remap(src=self.QueryImg, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
self.QueryImg2 = cv2.remap(src=self.QueryImg2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(self.QueryImg2, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_right and ret_left:
print('Found chessboard in both images')
self.chessBoard = True
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
self.corners2 = corners2_left
cv2.drawChessboardCorners(self.QueryImg, (10, 7), self.corners2, ret_left)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K_left, self.D_left)
imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.QueryImg = self.draw(self.QueryImg, corners=corners2_left, imgpts=imgpts)
self.pixelsPoints = np.asarray(corners2_left).squeeze()
self.pixels_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg2, (10, 7), corners2_right, ret_right)
self.pixels_right = np.asarray(corners2_right).squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
'''print('TRIANGULATE HERE==========================================')
P_1 = np.vstack((np.hstack((np.eye(3), np.zeros(3)[:, np.newaxis])), [0, 0, 0, 1])) # left camera
P_2 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # right camera
print('P1_{}, P_2{}, x_left:{}, x_right:{}'.format(np.shape(P_1), np.shape(P_2),
np.shape(self.x_left), np.shape(self.x_right)))
X_w, X1, X2 = triangulation(self.x_left,self.x_right,P_1,P_2)
print('X_w:{}, X1:{}, X2:{}, '.format(np.shape(X_w), np.shape(X1), np.shape(X2)))
print(X_w[0])
print(X1[0])
print(X2[0])'''
'''R1 = np.eye(3)
R2 = self.R
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
imagePoint1 = self.x_left[0]
imagePoint2 = self.x_right[0]
print('imagePoint1:{}, imagePoint2:{}'.format(np.shape(imagePoint1), np.shape(imagePoint2)))
print('self.K_left ')
print(self.K_left)
print('self.K_right ')
print(self.K_right)
p1,p2 = test(R1,R2,t1,t2,imagePoint1,imagePoint2,K1=self.K_left,K2=self.K_right, D1=self.D_left,D2=self.D_right)
p1 = np.array(p1).squeeze().astype(int)
p2 = np.array(p2).squeeze().astype(int)
print('p1:{}, p2:{}'.format(np.shape(p1), np.shape(p2)))
#d2 = distance_matrix(X_w, X_w)
#print('d2:{}'.format(d2))
cv2.circle(self.QueryImg, (p1[0],p1[1]), 7, (255, 0, 0), 7)
cv2.circle(self.QueryImg2, (p2[0], p2[1]), 7, (255, 0, 0), 7)
cv2.imshow('QueryImg', cv2.resize(self.QueryImg,None,fx=.5,fy=.5))
cv2.imshow('QueryImg2', cv2.resize(self.QueryImg2, None, fx=.5, fy=.5))
cv2.waitKey(0)
cv2.destroyAllWindows()'''
else:
self.chessBoard = False
self.useVoxel = False
print('No chessboard ')
corners2_left, ids_left, rejectedImgPoints = aruco.detectMarkers(gray_left, self.ARUCO_DICT)
corners2_left, ids_left, _, _ = aruco.refineDetectedMarkers(image=gray_left,
board=self.calibation_board,
detectedCorners=corners2_left,
detectedIds=ids_left,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_left,
distCoeffs=self.D_left)
corners2_right, ids_right, rejectedImgPoints = aruco.detectMarkers(gray_right, self.ARUCO_DICT)
corners2_right, ids_right, _, _ = aruco.refineDetectedMarkers(image=gray_right,
board=self.calibation_board,
detectedCorners=corners2_right,
detectedIds=ids_right,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_right,
distCoeffs=self.D_right)
if np.all(ids_left != None) and np.all(ids_right != None):
print('found charuco board, in both images')
retval_left, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners2_left, ids_left,
self.calibation_board,
self.K_left, self.D_left, None,
None)
retval_right, self.rvecs_right, self.tvecs_right = aruco.estimatePoseBoard(corners2_right,
ids_right,
self.calibation_board,
self.K_right,
self.D_right, None,
None)
if retval_left and retval_right:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
self.tvecs, 0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners2_left, ids_left,
borderColor=(0, 0, 255))
b = 1
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs_right, self.tvecs_right, self.K_right,
self.D_right)
self.corners2_right = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
circle_tvec, 0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('Cannot estimate board position for both charuco')
self.pixelsPoints = self.corners2.squeeze()
self.pixels_left = self.pixelsPoints
self.pixels_right = self.corners2_right.squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
# self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
print('disparity:{}'.format(np.shape(disparity)))
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
else:
print('No any board found!!!')
else:
# Undistortion
h, w = self.QueryImg.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (w, h), 1, (w, h))
dst = cv2.undistort(self.QueryImg, self.K, self.D, None, newcameramtx)
x, y, w, h = roi
self.QueryImg = dst[y:y + h, x:x + w]
gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
if ret: # found chessboard
print('Found chessboard')
self.chessBoard = True
self.corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg, (10, 7), corners, ret)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K, self.D)
# ret, self.rvecs, self.tvecs, inliers = cv2.solvePnPRansac(self.objp, self.corners2, self.K, self.D)
self.imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K, self.D)
self.QueryImg = self.draw(self.QueryImg, self.corners2, self.imgpts)
self.pixelsPoints = np.asarray(self.corners2).squeeze()
else: # check for charuco
self.chessBoard = False
self.useVoxel = False
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
self.chessBoard = False
if len(ids) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
if retval:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, self.tvecs,
0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, ids,
borderColor=(0, 0, 255))
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, circle_tvec,
0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('No board Found')
self.image_ax = self.fig.add_subplot(1, 2, 2)
#self.image_ax = self.fig.add_subplot(1, 2, 1)
self.image_ax.imshow(self.QueryImg)
self.image_ax.set_axis_off()
self.image_ax.set_xlabel('Y')
self.image_ax.set_ylabel('Z')
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlabel('X', fontsize=10)
self.ax.set_ylabel('Y', fontsize=10)
self.ax.set_zlabel('Z', fontsize=10)
self.fig.tight_layout()
plt.subplots_adjust(left=.15, bottom=0.2)
#plt.subplots_adjust( bottom=0.2)
self.Rx, self.Ry, self.Rz = [np.deg2rad(-90), 0, np.deg2rad(-40)] if self.chessBoard else [0, 0, 0]
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.savePoints = Button(plt.axes([0.03, 0.45, 0.15, 0.04], ), 'filter points', color='white')
self.savePoints.on_clicked(self.getClosestPoints)
self.resetBtn = Button(plt.axes([0.03, 0.25, 0.15, 0.04], ), 'reset', color='white')
self.resetBtn.on_clicked(self.reset)
self.X_btn = Button(plt.axes([0.03, 0.9, 0.024, 0.04], ), 'X', color='red')
self.X_btn.on_clicked(self.Close)
self.OK_btn = Button(plt.axes([0.03, 0.83, 0.074, 0.04], ), 'OK', color='green')
self.OK_btn.on_clicked(self.OK_btnClick)
self.not_OK_btn = Button(plt.axes([0.105, 0.83, 0.074, 0.04], ), 'not OK', color='red')
self.not_OK_btn.on_clicked(self.not_OK_btnClick)
self.saveCorrespondences = Button(plt.axes([0.03, 0.76, 0.15, 0.04], ), 'Save points', color='white')
self.saveCorrespondences.on_clicked(self.savePointsCorrespondences)
self.fitChessboard = Button(plt.axes([0.03, 0.66, 0.15, 0.04], ), 'auto fit', color='white')
self.fitChessboard.on_clicked(self.auto_fitBoard)
# set up sliders
self.Rx_Slider = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Rx', -180, 180.0, valinit=np.degrees(self.Rx))
self.Ry_Slider = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'Ry', -180, 180.0, valinit=np.degrees(self.Ry))
self.Rz_Slider = Slider(plt.axes([0.25, 0.05, 0.65, 0.03]), 'Rz', -180, 180.0, valinit=np.degrees(self.Rz))
self.Rx_Slider.on_changed(self.update_R)
self.Ry_Slider.on_changed(self.update_R)
self.Rz_Slider.on_changed(self.update_R)
self.check = CheckButtons(plt.axes([0.03, 0.3, 0.15, 0.12]), ('Axes', 'Black', 'Annotate'),
(self.axis_on, self.colour, self.Annotate))
self.check.on_clicked(self.func_CheckButtons)
# set up translation buttons
self.step = .1 # m
self.trigger = True
self.Tx_btn_plus = Button(plt.axes([0.05, 0.15, 0.04, 0.045]), '+Tx', color='white')
self.Tx_btn_plus.on_clicked(self.Tx_plus)
self.Tx_btn_minus = Button(plt.axes([0.12, 0.15, 0.04, 0.045]), '-Tx', color='white')
self.Tx_btn_minus.on_clicked(self.Tx_minus)
self.Ty_btn_plus = Button(plt.axes([0.05, 0.1, 0.04, 0.045]), '+Ty', color='white')
self.Ty_btn_plus.on_clicked(self.Ty_plus)
self.Ty_btn_minus = Button(plt.axes([0.12, 0.1, 0.04, 0.045]), '-Ty', color='white')
self.Ty_btn_minus.on_clicked(self.Ty_minus)
self.Tz_btn_plus = Button(plt.axes([0.05, 0.05, 0.04, 0.045]), '+Tz', color='white')
self.Tz_btn_plus.on_clicked(self.Tz_plus)
self.Tz_btn_minus = Button(plt.axes([0.12, 0.05, 0.04, 0.045]), '-Tz', color='white')
self.Tz_btn_minus.on_clicked(self.Tz_minus)
self.Tx_flip = Button(plt.axes([0.17, 0.15, 0.04, 0.045]), 'FlipX', color='white')
self.Tx_flip.on_clicked(self.flipX)
self.Ty_flip = Button(plt.axes([0.17, 0.1, 0.04, 0.045]), 'FlipY', color='white')
self.Ty_flip.on_clicked(self.flipY)
self.Tz_flip = Button(plt.axes([0.17, 0.05, 0.04, 0.045]), 'FlipZ', color='white')
self.Tz_flip.on_clicked(self.flipZ)
self.radio = RadioButtons(plt.axes([0.03, 0.5, 0.15, 0.15], ), ('Final', 'Init'), active=0)
self.radio.on_clicked(self.colorfunc)
self.tag = None
self.circle_center = None
self.errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d.",
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible.",
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
self.legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Original pointcloud', markerfacecolor='g', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Corners', markerfacecolor='k', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Margins', markerfacecolor='r', markersize=4),
]
def setUp(self):
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.board()
self.ax.legend(handles=self.legend_elements, loc='best')
if self.showImage:
self.getDepth_Inside_Outside()
self.fitNewPlan()
def auto_fitBoard(self, args):
# estimate 3D-R and 3D-t between chess and PointCloud
# Inital guess of the transformation
x0 = np.array([np.degrees(self.Rx), np.degrees(self.Ry), np.degrees(self.Rz), self.Tx, self.Ty, self.Tz])
report = {"error": [], "template": []}
def f_min(x):
self.Rx, self.Ry, self.Rz = np.deg2rad(x[0]), np.deg2rad(x[1]), np.deg2rad(x[2])
self.Tx, self.Ty, self.Tz = x[3], x[4], x[5]
template = self.board(plot=False)
if self.useInitialPointCloud:
dist_mat = distance_matrix(template, self.point_cloud)
else:
dist_mat = distance_matrix(template, self.corners_)
err_func = dist_mat.sum(axis=1) # N x 1
# err_func = dist_mat.sum(axis=0) # N x 1
if self.debug:
print('errors = {}, dist_mat:{}, err_func:{}'.format(round(np.sum(err_func), 2), np.shape(dist_mat),
np.shape(err_func)))
report["error"].append(np.sum(err_func))
report["template"].append(template)
return err_func
maxIters = 700
sol, status = leastsq(f_min, x0, ftol=1.49012e-07, xtol=1.49012e-07, maxfev=maxIters)
print('sol:{}, status:{}'.format(sol, status))
print(self.errors[status])
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.ICP_finetune_plot:
self.ICP_finetune_plot.remove()
self.lowerTemplate = False
self.board()
point_cloud = np.asarray(self.point_cloud, dtype=np.float32)
template = np.asarray(report["template"][0], dtype=np.float32) if self.applyICP_directly else np.asarray(
self.template_cloud, dtype=np.float32)
converged, self.transf, estimate, fitness = self.ICP_finetune(template, point_cloud)
# converged, self.transf, estimate, fitness = self.ICP_finetune(point_cloud,template)
self.estimate = np.array(estimate)
if self.chessBoard:
self.ICP_finetune_plot = self.ax.scatter(self.estimate[:, 0], self.estimate[:, 1], self.estimate[:, 2],
c='k', marker='o', alpha=0.8, s=4)
else:
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = self.estimate[idx, :]
self.ICP_finetune_plot = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2],
c='k', marker='o', alpha=0.8, s=4)
self.trigger = False
# set values of sol to Sliders
self.Rx_Slider.set_val(np.rad2deg(self.Rx))
self.Ry_Slider.set_val(np.rad2deg(self.Ry))
self.Rz_Slider.set_val(np.rad2deg(self.Rz))
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.trigger = True
self.board()
self.AnnotateEdges()
self.fig.canvas.draw_idle()
if self.showError:
print('min error:{} , at index:{}'.format(np.min(report["error"]), np.argmin(report["error"])))
rep = plt.figure(figsize=(15, 8))
plt.xlim(0, len(report["error"]) + 1)
plt.xlabel('Iteration')
plt.ylabel('RMSE')
plt.yticks(color='w')
plt.plot(np.arange(len(report["error"])) + 1, report["error"])
print('Start animation gif')
def update_graph(num):
data = np.asarray(report["template"][num])
graph._offsets3d = (data[:, 0], data[:, 1], data[:, 2])
title.set_text('Iteration {}'.format(num))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
title = ax.set_title('3D Test')
data = report["template"][0]
graph = ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2])
ani = animation.FuncAnimation(fig, update_graph, 101, interval=2, blit=False, repeat=False)
ani.save('myAnimation.gif', writer='imagemagick', fps=30)
print('Animation done')
plt.show()
def flipX(self, event):
self.Rx_Slider.set_val(np.rad2deg(self.Rx + np.pi))
self.update_R(0)
def flipY(self, event):
self.Ry_Slider.set_val(np.rad2deg(self.Ry + np.pi))
self.update_R(0)
def flipZ(self, event):
self.Rz_Slider.set_val(np.rad2deg(self.Rz + np.pi))
self.update_R(0)
def update_R(self, val):
if self.trigger:
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.Rx = np.deg2rad(self.Rx_Slider.val)
self.Ry = np.deg2rad(self.Ry_Slider.val)
self.Rz = np.deg2rad(self.Rz_Slider.val)
self.board()
self.fig.canvas.draw_idle()
def board(self, plot=True, given_origin=None, angle=None):
self.board_origin = [self.Tx, self.Ty, self.Tz] if given_origin is None else given_origin
if self.chessBoard:
self.nCols, self.nRows, org = 7 + 2, 10 + 2, np.asarray(self.board_origin)
#org[0] -= self.nCols / 2
#org[1] -= self.nRows / 2
org[0] -= 4
org[1] -= 6
#org = np.zeros(3)
if self.lowerTemplate:
nrCols, nrRows = 2, 3
else:
nrCols, nrRows = self.nCols, self.nRows
#nrCols, nrRows = self.nCols+1, self.nRows+1 #remove later
print('org:{}, self.nCols - >{}, nrCols:{}'.format(org,self.nCols,nrCols))
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,num=nrRows)
X, Y = np.linspace(org[0], org[0] + self.nCols-1, num=nrCols), np.linspace(org[1], org[1] + self.nRows-1,
num=nrRows)
print('X:{}'.format(X))
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
colors, colortuple = np.empty(X.shape, dtype=str), ('k', 'w')
for y in range(nrCols):
for x in range(nrRows):
colors[x, y] = colortuple[(x + y) % len(colortuple)]
colors[0, 0] = 'r'
alpha = 0.65
else:
self.nCols, self.nRows, org = 10, 10, np.asarray(self.board_origin)
org[0] -= self.nCols / 2
org[1] -= self.nRows / 2
# nrCols, nrRows = 4,4z
nrCols, nrRows = self.nCols, self.nRows
# nrCols, nrRows = 20, 20
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,
num=nrRows)
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
alpha = 0.25
angles = np.array([self.Rx, self.Ry, self.Rz]) if angle is None else np.array(angle)
Rot_matrix = self.eulerAnglesToRotationMatrix(angles)
X, Y, Z = X * self.s, Y * self.s, Z * self.s
corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0))
init = corners.reshape(-1, 3)
print('corners-----------------------------------------------------')
#print(init)
print('corners -> {}'.format(np.shape(init)))
dist_Lidar = distance_matrix(init, init)
print('dist_Lidar corners---------------------------------------------------------')
print(dist_Lidar[0, :11])
translation = np.mean(init, axis=0) # get the mean point
corners = np.subtract(corners, translation) # substract it from all the other points
X, Y, Z = np.transpose(np.add(np.dot(corners, Rot_matrix), translation), (2, 0, 1))
# corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0)).reshape(-1, 3)
corners = np.transpose(np.array([X, Y, Z]), (2, 1, 0)).reshape(-1, 3)
if plot:
if self.chessBoard:
self.chess = self.ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0.2, cmap='gray', alpha=alpha)
else:
self.chess = self.ax.plot_surface(X, Y, Z, linewidth=0.2, cmap='gray', alpha=alpha)
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = corners[idx, :]
self.corn = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2], c='tab:blue',
marker='o', s=5)
self.template_cloud = corners
return np.array(corners)
def getPointCoud(self, colorsMap='jet', skip=1, useRing = True):
# X, Y, Z, intensity, ring
if useRing:
originalCloud = np.array(np.load(self.file, mmap_mode='r'))[:,:5]
if InitLidar:
xyz = originalCloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
originalCloud[:, 0:3] = new_xyz
#mean_x = np.mean(originalCloud[:, 0])
#originalCloud[:, 0] = mean_x
df = pd.DataFrame(data=originalCloud, columns=["X", "Y", "Z","intens","ring"])
gp = df.groupby('ring')
keys = gp.groups.keys()
#groups = gp.groups
coolPoints, circlePoints = [],[]
for i in keys:
line = np.array(gp.get_group(i), dtype=np.float)
first,last = np.array(line[0], dtype=np.float)[:3],np.array(line[-1], dtype=np.float)[:3]
coolPoints.append(first)
coolPoints.append(last)
if self.chessBoard == False:
if len(line) > 50:
l = line[:,:3]
for i in range(2,len(l)-2,1):
d = np.linalg.norm(l[i]-l[i+1])
if d > 0.08: #half of the circle
circlePoints.append(l[i])
circlePoints.append(l[i+1])
self.coolPoints = np.array(coolPoints).squeeze()
self.ax.scatter(*self.coolPoints.T, color='r', marker='o', alpha=1, s=2)
print('coolPoints:{}, circlePoints:{}'.format(np.shape(self.coolPoints), np.shape(circlePoints)))
circlePoints = np.array(circlePoints)
if len(circlePoints)>0:
self.ax.scatter(*circlePoints.T, color='r', marker='o', alpha=1, s=5)
self.fitCircle(circlePoints)
#self.point_cloud = np.array(self.coolPoints, dtype=np.float32)
self.point_cloud = np.array(np.load(self.file, mmap_mode='r')[::skip, :3], dtype=np.float32)
if InitLidar:
xyz = self.point_cloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
self.point_cloud[:, 0:3] = new_xyz
# center the point_cloud
#mean_x = np.mean(self.point_cloud[:, 0])
#self.point_cloud[:, 0] = mean_x
self.point_cloud_mean = np.mean(self.point_cloud, axis=0)
self.Tx, self.Ty, self.Tz = self.point_cloud_mean
# self.point_cloud = self.point_cloud - self.point_cloud_mean
self.point_cloud_colors = np.array(np.load(self.file, mmap_mode='r'))[::skip, 3]
if self.plotInit:
cm = plt.get_cmap(colorsMap)
cNorm = matplotlib.colors.Normalize(vmin=min(self.point_cloud_colors), vmax=max(self.point_cloud_colors))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
self.p1 = self.ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2],
color=scalarMap.to_rgba(self.point_cloud_colors), s=0.2)
else:
self.p = pcl.PointCloud(self.point_cloud)
inlier, outliner, coefficients = self.do_ransac_plane_segmentation(self.p, pcl.SACMODEL_PLANE,
pcl.SAC_RANSAC, 0.01)
#self.planeEquation(coef=np.array(coefficients).squeeze())
self.point_cloud_init = self.point_cloud.copy()
if self.useVoxel:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.point_cloud)
self.point_cloud = np.array(pcd.voxel_down_sample(voxel_size=self.voxel_size).points)
# self.p1 = self.ax.scatter(outliner[:, 0], outliner[:, 1], outliner[:, 2], c='y', s=0.2)
self.p2 = self.ax.scatter(inlier[:, 0], inlier[:, 1], inlier[:, 2], c='g', s=0.2)
w, v = self.PCA(inlier)
point = np.mean(inlier, axis=0)
if self.chessBoard == False and self.circle_center:
#point[1:] = self.circle_center
point[[0,2]]= self.circle_center
w *= 2
if self.chessBoard==False and self.circle_center:
p = Circle(self.circle_center, self.circle_radius, alpha = .3, color='tab:blue')
self.ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=point[1], zdir="y")
self.p3 = self.ax.quiver([point[0]], [point[1]], [point[2]], [v[0, :] * np.sqrt(w[0])],
[v[1, :] * np.sqrt(w[0])],
[v[2, :] * np.sqrt(w[0])], linewidths=(1.8,))
def axisEqual3D(self, centers=None):
extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
# centers = np.mean(extents, axis=1) if centers is None
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def planeEquation(self, coef):
a, b, c, d = coef
mean = np.mean(self.point_cloud, axis=0)
normal = [a, b, c]
d2 = -mean.dot(normal)
# print('d2:{}'.format(d2))
# print('mean:{}'.format(mean))
# print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))
# plot the normal vector
startX, startY, startZ = mean[0], mean[1], mean[2]
startZ = (-normal[0] * startX - normal[1] * startY - d) * 1. / normal[2]
self.ax.quiver([startX], [startY], [startZ], [normal[0]], [normal[1]], [normal[2]], linewidths=(3,),edgecolor="red")
def PCA(self, data, correlation=False, sort=True):
# data = nx3
mean = np.mean(data, axis=0)
data_adjust = data - mean
#: the data is transposed due to np.cov/corrcoef syntax
if correlation:
matrix = np.corrcoef(data_adjust.T)
else:
matrix = np.cov(data_adjust.T)
eigenvalues, eigenvectors = np.linalg.eig(matrix)
if sort:
#: sort eigenvalues and eigenvectors
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def eulerAnglesToRotationMatrix(self, theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def do_ransac_plane_segmentation(self, pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
"""
Create the segmentation object
:param pcl_data: point could data subscriber
:param pcl_sac_model_plane: use to determine plane models
:param pcl_sac_ransac: RANdom SAmple Consensus
:param max_distance: Max distance for apoint to be considered fitting the model
:return: segmentation object
"""
seg = pcl_data.make_segmenter()
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
inlier_object = pcl_data.extract(inliers, negative=False)
outlier_object = pcl_data.extract(inliers, negative=True)
if len(inliers) <= 1:
outlier_object = [0, 0, 0]
inlier_object, outlier_object = np.array(inlier_object), np.array(outlier_object)
return inlier_object, outlier_object, coefficients
def func_CheckButtons(self, label):
if label == 'Axes':
if self.axis_on:
self.ax.set_axis_off()
self.axis_on = False
else:
self.ax.set_axis_on()
self.axis_on = True
elif label == 'Black':
if self.colour:
self.colour = False
self.ax.set_facecolor((1, 1, 1))
else:
self.colour = True
self.ax.set_facecolor((0, 0, 0))
elif label == 'Annotate':
self.Annotate = not self.Annotate
self.AnnotateEdges()
self.fig.canvas.draw_idle()
def ICP_finetune(self, points_in, points_out):
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(points_in)
cloud_out.from_array(points_out)
# icp = cloud_in.make_IterativeClosestPoint()
icp = cloud_out.make_IterativeClosestPoint()
converged, transf, estimate, fitness = icp.icp(cloud_in, cloud_out)
print('fitness:{}, converged:{}, transf:{}, estimate:{}'.format(fitness, converged, np.shape(transf),
np.shape(estimate)))
return converged, transf, estimate, fitness
def colorfunc(self, label):
if label == 'Init':
self.plotInit = True
else:
self.plotInit = False
self.reset(0)
def OK_btnClick(self, args):
self.OK = True
plt.close()
def not_OK_btnClick(self, args):
self.OK = False
plt.close()
def Close(self, args):
global globalTrigger
globalTrigger = False
plt.close()
def reset(self, args):
self.ax.cla()
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.Rx, self.Ry, self.Rz = 0, 0, 0
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.board()
self.fig.canvas.draw_idle()
def getClosestPoints(self, arg):
dist_mat = distance_matrix(self.template_cloud, self.point_cloud_init)
self.neighbours = np.argsort(dist_mat, axis=1)[:, 0]
self.finaPoints = np.asarray(self.point_cloud_init[self.neighbours, :]).squeeze()
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.p3:
self.p3.remove()
if self.p2:
self.p2.remove()
if self.p1:
self.p1.remove()
self.scatter_finalPoints = self.ax.scatter(self.finaPoints[:, 0], self.finaPoints[:, 1], self.finaPoints[:, 2],
c='k', marker='x', s=1)
self.corn = self.ax.scatter(self.template_cloud[:, 0], self.template_cloud[:, 1], self.template_cloud[:, 2],
c='blue', marker='o', s=5)
self.fig.canvas.draw_idle()
def Tz_plus(self, event):
self.Tz += self.step
self.update_R(0)
def Tz_minus(self, event):
self.Tz -= self.step
self.update_R(0)
def Ty_plus(self, event):
self.Ty += self.step
self.update_R(0)
def Ty_minus(self, event):
self.Ty -= self.step
self.update_R(0)
def Tx_plus(self, event):
self.Tx += self.step
self.update_R(0)
def Tx_minus(self, event):
self.Tx -= self.step
self.update_R(0)
def readCameraIntrin(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_left = self.camera_model['K_left']
self.K_right = self.camera_model['K_right']
self.D_left = self.camera_model['D_left']
self.D_right = self.camera_model['D_right']
# self.K_left = self.camera_model['K_right']
# self.K_right = self.camera_model['K_left']
# self.D_left = self.camera_model['D_right']
# self.D_right = self.camera_model['D_left']
# print('K_left')
# print(self.K_left)
# print('K_right')
# print(self.K_right)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.T = np.array([-0.98, 0., 0.12])[:, np.newaxis]
#self.T = np.array([-.75, 0., 0.])[:, np.newaxis]
#print('self T after {}'.format(np.shape(self.T)))
#angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
#self.R = euler_matrix(angles)
#Q = self.camera_model_rectify['Q']
#roi_left, roi_right = self.camera_model_rectify['roi_left'], self.camera_model_rectify['roi_right']
self.leftMapX, self.leftMapY = self.camera_model_rectify['leftMapX'], self.camera_model_rectify['leftMapY']
self.rightMapX, self.rightMapY = self.camera_model_rectify['rightMapX'], self.camera_model_rectify['rightMapY']
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
self.K = self.K_right
self.D = self.D_right
try:
N = 5
aruco_dict = aruco.custom_dictionary(0, N, 1)
aruco_dict.bytesList = np.empty(shape=(4, N - 1, N - 1), dtype=np.uint8)
A = np.array([[0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(A)
R = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(R)
V = np.array([[1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0]],
dtype=np.uint8)
O = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(O)
aruco_dict.bytesList[3] = aruco.Dictionary_getByteListFromBits(V)
self.ARUCO_DICT = aruco_dict
self.calibation_board = aruco.GridBoard_create(
markersX=2, markersY=2,
markerLength=0.126, markerSeparation=0.74,
dictionary=self.ARUCO_DICT)
except:
print('Install Aruco')
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
return img
def annotate3D(self, ax, s, *args, **kwargs):
self.tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(self.tag)
def AnnotateEdges(self, giveAX=None, givenPoints=None):
if self.Annotate:
# add vertices annotation.
if giveAX is None:
if self.lowerTemplate or self.chessBoard == False:
if self.chessBoard == False:
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
self.templatePoints = [pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]
self.templatePoints = np.array(self.templatePoints).reshape(-1, 3)
cornersToPLot = self.estimate[idx, :]
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=12, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(self.template_cloud):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
try:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
except:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols+1, self.nRows+1, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
# templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nRows,self.nCols, 3)[1:self.nRows-1,1:self.nCols-1,:]
self.templatePoints = np.array(templatePoints).reshape(-1, 3)
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(givenPoints):
self.annotate3D(giveAX, s=str(j), xyz=xyz_, fontsize=10, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
if self.showImage:
# annotate image
points = np.asarray(self.corners2).squeeze()
font, lineType = cv2.FONT_HERSHEY_SIMPLEX, 2 if self.chessBoard else 10
for i, point in enumerate(points):
point = tuple(point.ravel())
cv2.putText(self.QueryImg, '{}'.format(i), point, font, 1 if self.chessBoard else 3, (0, 0, 0)
if self.chessBoard else (255, 0, 0), lineType)
self.image_ax.imshow(self.QueryImg)
def getCamera_XYZ_Stereo(self):
#cam_rot, jac = cv2.Rodrigues(self.rvecs)
#mR = np.matrix(cam_rot)
#mT = np.matrix(self.tvecs)
#cam_trans = -mR * mT
_3DPoints = []
for i, pixel in enumerate(self.x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = self.depth[i]
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - self.fxypxy[2]) / self.fxypxy[0]
pt[1] = pt[2] * (pt[1] - self.fxypxy[3]) / self.fxypxy[1]
# pt = pt.dot(cam_rot.T) + self.tvecs
_3DPoints.append(pt)
print('_3DPoints {}'.format(np.shape(_3DPoints)))
print('tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ_Stereo mean {}'.format(np.mean(_3DPoints, axis=0)))
_3DPoints = np.array(_3DPoints).squeeze()
print('from disparity getCamera_XYZ_Stereo ')
d = distance_matrix(_3DPoints,_3DPoints)
print(d)
return _3DPoints
def getCamera_XYZ(self):
R_mtx, jac = cv2.Rodrigues(self.rvecs)
inv_R_mtx = np.linalg.inv(R_mtx)
inv_K = np.linalg.inv(self.K)
def compute_XYZ(u, v): # from 2D pixels to 3D world
uv_ = np.array([[u, v, 1]], dtype=np.float32).T
suv_ = uv_
xyz_ = inv_K.dot(suv_) - self.tvecs
XYZ = inv_R_mtx.dot(xyz_)
pred = XYZ.T[0]
return pred
Camera_XYZ = []
for i, point in enumerate(self.pixelsPoints):
xyz = compute_XYZ(u=point[0], v=point[1])
# print 'xyz:{}'.format(xyz)
Camera_XYZ.append(xyz)
Camera_XYZ = np.array(Camera_XYZ)
print('init tvec : {}'.format(np.asarray(self.tvecs).squeeze()))
print('Camera_XYZ mean {}'.format(np.mean(Camera_XYZ, axis=0)))
if self.img_file2 is None:
for i, point in enumerate(Camera_XYZ):
imgpts, jac = cv2.projectPoints(point, self.rvecs, self.tvecs, self.K, self.D)
imgpts = np.asarray(imgpts).squeeze()
cv2.circle(self.QueryImg, (int(imgpts[0]), int(imgpts[1])), 7, (255, 0, 0), 7)
self.image_ax.imshow(self.QueryImg)
return Camera_XYZ
def getImagePixels(self):
img = cv2.imread(self.img_file) #left image
img2 = cv2.imread(self.img_file2) # left image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pixelsPoints,pixelsPoints2, _3DreconstructedBoard = [],[],[]
if self.chessBoard:
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
ret2, corners2 = cv2.findChessboardCorners(gray2, (10, 7), None)
if ret and ret2: # found chessboard
print('Found chessboard')
corners_2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
corners2_2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), self.criteria)
pixelsPoints = np.asarray(corners_2).squeeze()
pixelsPoints2 = np.asarray(corners2_2).squeeze()
cv2.drawChessboardCorners(img, (10, 7), corners_2, ret)
cv2.drawChessboardCorners(img2, (10, 7), corners2_2, ret)
# Find the rotation and translation vectors.
success, rvecs, tvecs, inliers = cv2.solvePnPRansac(self.objp, corners_2, self.K, self.D)
rvecs, _ = cv2.Rodrigues(rvecs)
_3Dpoints = self.objp
# project 3D points to image plane
_2Dpoints, jac = cv2.projectPoints(_3Dpoints, rvecs, tvecs, self.K, self.D)
_2Dpoints = np.array(_2Dpoints, dtype=np.float32).squeeze()
print('_2Dpoints -> {}'.format(np.shape(_2Dpoints)))
for i in range(len(_2Dpoints)):
cv2.circle(img, tuple(_2Dpoints[i]), 5, (0, 255, 0), 3)
_3Dpoints = rvecs.dot(_3Dpoints.T) + tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat[0, :11])
_3DreconstructedBoard = _3Dpoints
else:
return None,None
else:
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
corners2, ids2, rejectedImgPoints2 = aruco.detectMarkers(gray2, self.ARUCO_DICT)
corners2, ids2, rejectedImgPoints2, recoveredIds2 = aruco.refineDetectedMarkers(
image=gray2, board=self.calibation_board, detectedCorners=corners2, detectedIds=ids2,
rejectedCorners=rejectedImgPoints2, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None) and np.all(ids2 != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
if len(ids) and len(ids2) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
retval2, self.rvecs2, self.tvecs2 = aruco.estimatePoseBoard(corners2, ids2,
self.calibation_board, self.K,
self.D, None, None)
img = aruco.drawDetectedMarkers(img, corners, ids,borderColor=(0, 0, 255))
img2 = aruco.drawDetectedMarkers(img2, corners2, ids2, borderColor=(0, 0, 255))
if retval and retval2:
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
self.dst2, jacobian = cv2.Rodrigues(self.rvecs2)
#self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0],[.5,.5,0]])
_3Dpoints = self.dst.T.dot(np.array(self.pts).squeeze().T) + self.tvecs
_3Dpoints = _3Dpoints.T
print('_3Dpoints->{}'.format(np.shape(_3Dpoints)))
dist_mat = distance_matrix(_3Dpoints, _3Dpoints)
print('dist_mat for OpencvReconstructed')
print(dist_mat)
_3DreconstructedBoard = _3Dpoints
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img = cv2.line(img, top_right, bot_right, (0, 255, 0), 4)
img = cv2.line(img, bot_right, bot_left, (0, 255, 0), 4)
img = cv2.line(img, bot_left, top_left, (0, 255, 0), 4)
img = cv2.line(img, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img, tuple(corners2[-1]), 5, (0, 255, 0), 3)
cv2.circle(img, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints = np.asarray(corners2).squeeze()
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs2, self.tvecs2, self.K, self.D)
#corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
corners2 = np.array(imgpts).squeeze()
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
img2 = cv2.line(img2, top_right, bot_right, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_right, bot_left, (0, 255, 0), 4)
img2 = cv2.line(img2, bot_left, top_left, (0, 255, 0), 4)
img2 = cv2.line(img2, top_left, top_right, (0, 255, 0), 4)
cv2.circle(img2, tuple(corners2[-1]), 5, (0, 255, 0), 3)
#cv2.circle(img2, tuple(corners2[-2]), 5, (0, 0, 255), 3)
pixelsPoints2 = np.asarray(corners2).squeeze()
else:
return None,None
else:
return None,None
else:
return None,None
scale = .4
_horizontal = np.hstack(
(cv2.resize(img, None, fx=scale, fy=scale), cv2.resize(img2, None, fx=scale, fy=scale)))
cv2.imshow('_horizontal', _horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()
return pixelsPoints,pixelsPoints2, _3DreconstructedBoard
def savePointsCorrespondences(self, args):
display = True
fig = plt.figure(figsize=plt.figaspect(1))
ax = plt.axes(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if self.chessBoard:
legend_elements = [
Line2D([0], [0], marker='o', label='board template', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='green', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='k', markersize=6),
Line2D([0], [0], marker='o', label='Camera_XYZ', markerfacecolor='red', markersize=6),
]
board_template = self.template_cloud
board_template_ICP_finetuned = self.estimate
closest_lidar_points = self.finaPoints
try:
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols, self.nRows, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
except:
print('Second-----------------------------')
icp_finetuned_inside = np.asarray(self.estimate).reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
board_template_inside = board_template.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
closest_lidar_points_inside = closest_lidar_points.reshape(self.nCols+1, self.nRows+1, 3)[1:self.nCols - 1,
1:self.nRows - 1, :]
icp_finetuned_inside = np.array(icp_finetuned_inside).reshape(-1, 3)
board_template_inside = np.array(board_template_inside).reshape(-1, 3)
print('board_template_inside-----------------------------------------------------')
print(board_template_inside)
print('board_template_inside -> {}'.format(np.shape(board_template_inside)))
dist_Lidar = distance_matrix(board_template_inside, board_template_inside)
print('dist_Lidar---------------------------------------------------------')
print(dist_Lidar[0, :11])
closest_lidar_points_inside = np.array(closest_lidar_points_inside).reshape(-1, 3)
Camera_XYZ = self.getCamera_XYZ()
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
display = True
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('icp_finetuned_inside:{}'.format(np.shape(icp_finetuned_inside)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('closest_lidar_points_inside:{}'.format(np.shape(closest_lidar_points_inside)))
print('Camera_XYZ:{}'.format(np.shape(Camera_XYZ)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
#dist = distance_matrix(Camera_XYZ_Stereo, Camera_XYZ_Stereo)
#print('distance matrix Camera_XYZ_Stereo:{}'.format(dist))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*icp_finetuned_inside.T, color='g', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*closest_lidar_points_inside.T, color='k', marker='x', alpha=1, s=20)
ax.scatter(*Camera_XYZ.T, color='k', marker='x', alpha=1, s=30)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=1, s=3)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ_Stereo, axis=0) if self.img_file2 is not None else np.mean(board_template,axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft)<=0:
print('Cannot get pixels points !!! ')
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('icp_finetuned_inside', icp_finetuned_inside),
('closest_lidar_points', closest_lidar_points),
('closest_lidar_points_inside', closest_lidar_points_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ', Camera_XYZ)])
# save_obj(self.points_correspondences, self.name)
else:
legend_elements = [
Line2D([0], [0], marker='o', label='board template all', markerfacecolor='b', markersize=6),
Line2D([0], [0], marker='o', label='ICP finetuned', markerfacecolor='red', markersize=6),
Line2D([0], [0], marker='o', label='board template inside', markerfacecolor='tab:blue', markersize=6),
Line2D([0], [0], marker='o', label='closest lidar points', markerfacecolor='red', markersize=6),
]
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
board_template = np.array([pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1,
3)
board_template = board_template
pts = np.asarray(self.estimate.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.estimate[idx], axis=0)
board_template_ICP_finetuned = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
board_template_inside = self.templatePoints
pts = np.asarray(self.finaPoints.copy()).reshape(self.nCols, self.nRows, 3)
center = np.mean(self.finaPoints[idx], axis=0)
closest_lidar_points = np.array(
[pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]).reshape(-1, 3)
if self.img_file2:
Camera_XYZ_Stereo = self.getCamera_XYZ_Stereo()
else:
Camera_XYZ_Stereo = np.array([[0, 0, 0]])
if display:
print('board_template:{}'.format(np.shape(board_template)))
print('board_template_ICP_finetuned:{}'.format(np.shape(board_template_ICP_finetuned)))
print('board_template_inside:{}'.format(np.shape(board_template_inside)))
print('closest_lidar_points:{}'.format(np.shape(closest_lidar_points)))
print('Camera_XYZ_Stereo:{}'.format(np.shape(Camera_XYZ_Stereo)))
ax.scatter(*board_template.T, color='b', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_ICP_finetuned.T, color='r', marker='o', alpha=.5, s=8)
ax.scatter(*board_template_inside.T, color='tab:blue', marker='x', alpha=1, s=10)
ax.scatter(*closest_lidar_points.T, color='r', marker='x', alpha=.8, s=10)
ax.scatter(*Camera_XYZ_Stereo.T, color='r', marker='o', alpha=.8, s=20)
self.AnnotateEdges(giveAX=ax, givenPoints=board_template_inside)
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(board_template, axis=0)
# centers = np.mean(Camera_XYZ, axis=0) if self.img_file2 is not None else np.mean(board_template, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
self.pixelsPointsLeft, self.pixelsPointsRight, _3DreconstructedBoard = self.getImagePixels()
_3DreconstructedBoard = np.array(_3DreconstructedBoard).squeeze()
print('_3DreconstructedBoard -> {}'.format(np.shape(_3DreconstructedBoard)))
if len(self.pixelsPointsLeft) <= 0:
print('Cannot get pixels points !!! ')
ax.scatter(*_3DreconstructedBoard.T, color='b', marker='x', alpha=1, s=20)
print('pixelsPointsLeft:{}'.format(np.shape(self.pixelsPointsLeft)))
print('pixelsPointsRight:{}'.format(np.shape(self.pixelsPointsRight)))
print('_3DreconstructedBoard:{}'.format(np.shape(_3DreconstructedBoard)))
self.points_correspondences = dict([
('board_template', board_template),
('board_template_ICP_finetuned', board_template_ICP_finetuned),
('board_template_inside', board_template_inside),
('pixelsPointsLeft', self.pixelsPointsLeft),
('pixelsPointsRight', self.pixelsPointsRight),
('_3DreconstructedBoard',_3DreconstructedBoard),
('Camera_XYZ_Stereo', Camera_XYZ_Stereo),
('closest_lidar_points', closest_lidar_points)])
# save_obj(self.points_correspondences, self.name)
ax.legend(handles=legend_elements, loc='best')
plt.show()
def getDepth_Inside_Outside(self):
calibrations = ['inside', 'outside']
output = []
for calib in calibrations:
camera_model = load_obj('{}_combined_camera_model'.format(calib))
camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(calib))
K_left = camera_model['K_right']
D_left = camera_model['D_right']
T = camera_model['T']
leftMapX, leftMapY = camera_model_rectify['leftMapX'], camera_model_rectify['leftMapY']
rightMapX, rightMapY = camera_model_rectify['rightMapX'], camera_model_rectify['rightMapY']
imgleft = cv2.imread(self.img_file)
imgright = cv2.imread(self.img_file2)
if stereoRectify:
imgleft = cv2.remap(src=imgleft, map1=leftMapX, map2=leftMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
imgright = cv2.remap(src=imgright, map1=rightMapX, map2=rightMapY, interpolation=cv2.INTER_LINEAR, dst=None,borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(imgleft, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(imgright, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_left and ret_right: # found chessboard
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
x_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
x_right = np.asarray(corners2_right).squeeze()
baseline = abs(T[0])
focal_length, cx, cy = K_left[0, 0], K_left[0, 2], K_left[1, 2]
disparity = np.sum(np.sqrt((x_left - x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
depth = (baseline * focal_length / disparity) # .reshape(10,7)
fxypxy = [K_left[0, 0], K_left[1, 1], cx, cy]
print('{} fx:{}, fy:{}'.format(calib, round(K_left[0, 0],2), round(K_left[1, 1],2)))
_3DPoints = []
for i, pixel in enumerate(x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = depth[i]
# print('u:{},v:{},distance:{}'.format(u,v, distance))
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - fxypxy[2]) / fxypxy[0]
pt[1] = pt[2] * (pt[1] - fxypxy[3]) / fxypxy[1]
_3DPoints.append(pt)
_3DPoints = np.array(_3DPoints)
output.append(_3DPoints)
else:
print('cannot detect board in both images')
if len(output)>1:
inside_3D = np.array(output[0]).squeeze()
outisde_3D = np.array(output[1]).squeeze()
#get the error for each point
a_min_b = inside_3D - outisde_3D
norm_total = np.linalg.norm(a_min_b)/70
norm_axis = np.linalg.norm(a_min_b, axis=0)/70
print('norm_total:{}, norm_axis:{}'.format(norm_total,norm_axis))
self._3DErros.append(norm_axis)
def fitNewPlan(self):
coolPoints = self.coolPoints
def minimum_bounding_rectangle(points):
pi2 = np.pi / 2.
# get the convex hull for the points
hull = ConvexHull(points)
hull_points = points[hull.vertices]
y_saved = []
for simplex in hull.simplices:
y = coolPoints[simplex,1]
x = points[simplex, 0]
z = points[simplex, 1]
self.ax.plot(x, y, z, 'k-', alpha = .5)
y_saved.append(y)
y_saved = np.array(y_saved)
# calculate edge angles
edges = hull_points[1:] - hull_points[:-1]
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
rotations = np.vstack([
np.cos(angles),np.cos(angles - pi2),
np.cos(angles + pi2),np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
rval = np.array(rval)
d_matrix = distance_matrix(rval, points)
neighbours = np.argsort(d_matrix, axis=1)[:, 0]
rval2 = np.asarray(coolPoints[neighbours, 1]).squeeze()
return rval, rval2
points = list(self.coolPoints[:, [0, -1]])
y = np.mean(self.coolPoints[:, 1])
c, c2 = minimum_bounding_rectangle(np.array(points))
self.corners_ = []
for i,point in enumerate(c):
#self.corners_.append([point[0],y, point[1]])
self.corners_.append([point[0],c2[i], point[1]])
if self.chessBoard==False and self.circle_center:
self.corners_.append([self.circle_center[0],y,self.circle_center[1]])
self.corners_ = np.array(self.corners_)
self.ax.scatter(*self.corners_.T, color='k', marker='x', alpha=1, s=50)
def fitCircle(self, points):
if len(points)>0:
def calc_R(x, y, xc, yc):
"""calculate the distance of each 2D points from the center (xc, yc)"""
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f(c, x, y):
"""calculate the algebraic distance between the data points
and the mean circle centered at c=(xc, yc)"""
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def sigma(coords, x, y, r):
"""Computes Sigma for circle fit."""
dx, dy, sum_ = 0., 0., 0.
for i in range(len(coords)):
dx = coords[i][1] - x
dy = coords[i][0] - y
sum_ += (sqrt(dx * dx + dy * dy) - r) ** 2
return sqrt(sum_ / len(coords))
def hyper_fit(coords, IterMax=99, verbose=False):
"""
Fits coords to circle using hyperfit algorithm.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : s, sigma - variance of data wrt solution (float)
"""
X, Y = None, None
if isinstance(coords, np.ndarray):
X = coords[:, 0]
Y = coords[:, 1]
elif isinstance(coords, list):
X = np.array([x[0] for x in coords])
Y = np.array([x[1] for x in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
n = X.shape[0]
Xi = X - X.mean()
Yi = Y - Y.mean()
Zi = Xi * Xi + Yi * Yi
# compute moments
Mxy = (Xi * Yi).sum() / n
Mxx = (Xi * Xi).sum() / n
Myy = (Yi * Yi).sum() / n
Mxz = (Xi * Zi).sum() / n
Myz = (Yi * Zi).sum() / n
Mzz = (Zi * Zi).sum() / n
# computing the coefficients of characteristic polynomial
Mz = Mxx + Myy
Cov_xy = Mxx * Myy - Mxy * Mxy
Var_z = Mzz - Mz * Mz
A2 = 4 * Cov_xy - 3 * Mz * Mz - Mzz
A1 = Var_z * Mz + 4. * Cov_xy * Mz - Mxz * Mxz - Myz * Myz
A0 = Mxz * (Mxz * Myy - Myz * Mxy) + Myz * (Myz * Mxx - Mxz * Mxy) - Var_z * Cov_xy
A22 = A2 + A2
# finding the root of the characteristic polynomial
y = A0
x = 0.
for i in range(IterMax):
Dy = A1 + x * (A22 + 16. * x * x)
xnew = x - y / Dy
if xnew == x or not np.isfinite(xnew):
break
ynew = A0 + xnew * (A1 + xnew * (A2 + 4. * xnew * xnew))
if abs(ynew) >= abs(y):
break
x, y = xnew, ynew
det = x * x - x * Mz + Cov_xy
Xcenter = (Mxz * (Myy - x) - Myz * Mxy) / det / 2.
Ycenter = (Myz * (Mxx - x) - Mxz * Mxy) / det / 2.
x = Xcenter + X.mean()
y = Ycenter + Y.mean()
r = sqrt(abs(Xcenter ** 2 + Ycenter ** 2 + Mz))
s = sigma(coords, x, y, r)
iter_ = i
if verbose:
print('Regression complete in {} iterations.'.format(iter_))
print('Sigma computed: ', s)
return x, y, r, s
def least_squares_circle(coords):
"""Circle fit using least-squares solver.
Inputs:
- coords, list or numpy array with len>2 of the form:
[
[x_coord, y_coord],
...,
[x_coord, y_coord]
]
or numpy array of shape (n, 2)
Outputs:
- xc : x-coordinate of solution center (float)
- yc : y-coordinate of solution center (float)
- R : Radius of solution (float)
- residu : MSE of solution against training data (float)
"""
x, y = None, None
if isinstance(coords, np.ndarray):
x = coords[:, 0]
y = coords[:, 1]
elif isinstance(coords, list):
x = np.array([point[0] for point in coords])
y = np.array([point[1] for point in coords])
else:
raise Exception("Parameter 'coords' is an unsupported type: " + str(type(coords)))
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, _ = leastsq(f, center_estimate, args=(x, y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R) ** 2)
return xc, yc, R, residu
def plot_data_circle(x, y, xc, yc, R):
"""
Plot data and a fitted circle.
Inputs:
x : data, x values (array)
y : data, y values (array)
xc : fit circle center (x-value) (float)
yc : fit circle center (y-value) (float)
R : fir circle radius (float)
Output:
None (generates matplotlib plot).
"""
f = plt.figure(facecolor='white')
plt.axis('equal')
theta_fit = np.linspace(-pi, pi, 180)
x_fit = xc + R * np.cos(theta_fit)
y_fit = yc + R * np.sin(theta_fit)
plt.plot(x_fit, y_fit, 'b-', label="fitted circle", lw=2)
plt.plot([xc], [yc], 'bD', mec='y', mew=1)
plt.xlabel('x')
plt.ylabel('y')
# plot data
plt.scatter(x, y, c='red', label='data')
plt.legend(loc='best', labelspacing=0.1)
plt.grid()
plt.title('Fit Circle')
x1, y1, r1, resid1 = hyper_fit(points[:,[0,2]])
x2, y2, r2, resid2 = least_squares_circle(points[:,[0,2]])
#plot_data_circle(points[:,1], points[:,2],x,y,r)
if resid1>resid2:
x, y, r = x2, y2, r2
else:
x, y, r = x1, y1, r1
self.circle_center = (x, y)
self.circle_radius = r
def getData(chess=True):
pcl_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/*.npy'.format('chess' if chess else 'charuco'))
imgleft_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/left/*.png'.format('chess' if chess else 'charuco'))
imgright_files = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/{}/right/*.png'.format('chess' if chess else 'charuco'))
pcl_files.sort()
imgleft_files.sort()
imgright_files.sort()
GoodPoints,_3DErros, IMageNames = [],[],[]
for i, file in enumerate(pcl_files):
if globalTrigger:
print('work with {}'.format(file))
image_left = imgleft_files[i]
image_right = imgright_files[i]
filt = PointCloud_filter(file=file, img_file=image_left, img_file2=image_right, debug=False)
filt.setUp()
plt.show()
plt.close()
print('\n OK:{}, Save points_correspondences : {}'.format(filt.OK, np.shape(filt.points_correspondences)))
if filt.OK:
GoodPoints.append(filt.points_correspondences)
print('save data {} '.format(np.shape(GoodPoints)))
_3DErros.append(filt._3DErros)
IMageNames.append(os.path.basename(image_left))
else:
print('Close')
break
#save_obj(GoodPoints, 'GoodPoints2_{}'.format('chess' if chess else 'charuco'))
print('Data saved in GoodPoints')
showErros(_3DErros, IMageNames)
def euler_from_matrix(R):
beta = -np.arcsin(R[2, 0])
alpha = np.arctan2(R[2, 1] / np.cos(beta), R[2, 2] / np.cos(beta))
gamma = np.arctan2(R[1, 0] / np.cos(beta), R[0, 0] / np.cos(beta))
return np.array((alpha, beta, gamma))
def euler_matrix(theta):
R = np.array([[np.cos(theta[1]) * np.cos(theta[2]),
np.sin(theta[0]) * np.sin(theta[1]) * np.cos(theta[2]) - np.sin(theta[2]) * np.cos(theta[0]),
np.sin(theta[1]) * np.cos(theta[0]) * np.cos(theta[2]) + np.sin(theta[0]) * np.sin(
theta[2])],
[np.sin(theta[2]) * np.cos(theta[1]),
np.sin(theta[0]) * np.sin(theta[1]) * np.sin(theta[2]) + np.cos(theta[0]) * np.cos(theta[2]),
np.sin(theta[1]) * np.sin(theta[2]) * np.cos(theta[0]) - np.sin(theta[0]) * np.cos(
theta[2])],
[-np.sin(theta[1]), np.sin(theta[0]) * np.cos(theta[1]),
np.cos(theta[0]) * np.cos(theta[1])]])
return R
class LiDAR_Camera_Calibration(object):
def __init__(self, file, chess = True, debug=True):
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * .1
self.debug = debug
self.file = file
self.chess = chess
if chess:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside',
'icp_finetuned_inside','closest_lidar_points','closest_lidar_points_inside',
'pixelsPoints','Camera_XYZ_Stereo','Camera_XYZ']
else:
self.data_key = ['board_template','board_template_ICP_finetuned','board_template_inside','pixelsPoints',
'Camera_XYZ_Stereo','closest_lidar_points']
self.readIntrinsics()
try:
self.load_points()
except:
print('cannot load data points')
'''self.Rotation = np.array([[ 0.94901505, 0.01681284, 0.3147821 ],
[-0.01003801, 0.99968204, -0.02313113],
[-0.31507091, 0.018792, 0.94888207]]).squeeze()
self.Translation = np.array([[-0.98078971],
[ 0.00600202],
[ 0.19497569]]).squeeze()
#self.Translation[0] = -.64
euler = euler_from_matrix(self.Rotation)
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.Rotation)
print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler[1] = np.deg2rad(22.598)
self.Rotation = euler_matrix(euler)'''
def rmse(self, objp, imgp, K, D, rvec, tvec):
print('objp:{}, imgp:{}'.format(np.shape(objp), np.shape(imgp)))
predicted, _ = cv2.projectPoints(objp, rvec, tvec, K, D)
print('rmse=====================================================')
print('predicted -> {}, type - >{}'.format(np.shape(predicted), type(predicted)))
predicted = cv2.undistortPoints(predicted, K, D, P=K)
predicted = predicted.squeeze()
pix_serr = []
for i in range(len(predicted)):
xp = predicted[i, 0]
yp = predicted[i, 1]
xo = imgp[i, 0]
yo = imgp[i, 1]
pix_serr.append((xp - xo) ** 2 + (yp - yo) ** 2)
ssum = sum(pix_serr)
return math.sqrt(ssum / len(pix_serr))
def readIntrinsics(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_right = self.camera_model['K_left']
self.K_left = self.camera_model['K_right']
self.D_right = self.camera_model['D_left']
self.D_left = self.camera_model['D_right']
print(' self.K_right')
print( self.K_right)
print(' self.K_left')
print(self.K_left)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.K = self.K_right
self.D = self.D_right
print('self T before {}'.format(np.shape(self.T)))
self.T = np.array([-0.96, 0., 0.12])[:, np.newaxis]
print('self T after {}'.format(np.shape(self.T)))
angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
self.R = euler_matrix(angles)
#-----------------------------------------------------
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#print(self.R)
print('translation is {}-----------------------------'.format(self.T))
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
#print('R1:{}'.format(R1))
#print('R2:{}'.format(R2))
# print('euler1->{}'.format(euler))
angles = euler_from_matrix(self.R)
print('self.R: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R1)
#print('euler1->{}'.format(euler))
angles = euler_from_matrix(R1)
#print('rotation1: ', [(180.0 / math.pi) * i for i in angles])
euler = euler_from_matrix(R2)
#print('euler2->{}'.format(euler))
angles = euler_from_matrix(R2)
#print('rotation2: ', [(180.0 / math.pi) * i for i in angles])
self.R1 = R1
self.R2 = R2
self.P1 = P1
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
print('Got camera intrinsic')
print('Got camera-lidar extrinsics')
def load_points(self):
self.Lidar_3D, self.Image_2D,self.Image_2D2, self.Image_3D,self.Camera_XYZ = [],[],[],[],[]
with open(self.file, 'rb') as f:
self.dataPoinst = pickle.load(f, encoding='latin1')
#with open(self.file,'rb') as f:
#self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
#self.N = 1
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] #N x 3
#pixelsPoints = dictionary_data['pixelsPoints'] #N x 2
#StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] #N x 3
pixelsPointsLeft = dictionary_data['pixelsPointsLeft']
pixelsPointsRight = dictionary_data['pixelsPointsRight']
StereoCam_3D_points = dictionary_data['_3DreconstructedBoard'] #N x 3
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPointsLeft)
self.Image_2D2.append(pixelsPointsRight)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
#print('Cannot read data')
pass
#self.Lidar_3D = np.array(self.Lidar_3D).reshape(-1,3)
#self.Image_2D = np.array(self.Image_2D).reshape(-1,2)
#self.Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_2D:{}, Image_2D2:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),np.shape(self.Image_2D2),
np.shape(self.Image_3D)))
def plotData(self):
self.fig = plt.figure(figsize=plt.figaspect(0.33))
self.fig.tight_layout()
for i in range(self.N):
print('{}/{}'.format(i+1,self.N))
ax1 = self.fig.add_subplot(1, 3, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax2 = self.fig.add_subplot(1, 3, 2, projection='3d')
ax2.set_title('3D Stereo cameras')
ax2.set_xlabel('X', fontsize=8)
ax2.set_ylabel('Y', fontsize=8)
ax2.set_zlabel('Z', fontsize=8)
ax3 = self.fig.add_subplot(1, 3, 3, projection='3d')
ax3.set_title('2D pixels')
ax3.set_xlabel('X', fontsize=8)
ax3.set_ylabel('Y', fontsize=8)
ax3.set_zlabel('Z', fontsize=8)
_3d_LIDAR = np.array(self.Lidar_3D[i])
ax1.scatter(*_3d_LIDAR.T)
self.axisEqual3D(ax1, _3d_LIDAR)
_3d_cam = np.array(self.Image_3D[i])
ax2.scatter(*_3d_cam.T, c='r')
self.axisEqual3D(ax2,_3d_cam)
_2d_cam = np.array(self.Image_2D[i])
ax3.scatter(*_2d_cam.T, c='g')
self.axisEqual3D(ax3, _2d_cam)
plt.show()
def axisEqual3D(self,ax,data):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = np.mean(data, axis=0)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def get3D_3D_homography(self, src, dst): #both or Nx3 matrices
src_mean = np.mean(src, axis=0)
dst_mean = np.mean(dst, axis=0)
# Compute covariance
"""try:
H = reduce(lambda s, (a, b): s + np.outer(a, b), zip(src - src_mean, dst - dst_mean), np.zeros((3, 3)))
u, s, v = np.linalg.svd(H)
R = v.T.dot(u.T) # Rotation
T = - R.dot(src_mean) + dst_mean # Translation
H = np.hstack((R, T[:, np.newaxis]))
return H,R.T,T
except:
print('switch to python 2')"""
def calibrate_3D_3D_old(self):
print('3D-3D ========================================================================================')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_3D3D_{}.pkl'.format('chess')
file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format('chess')
self.Lidar_3D, self.Image_2D, self.Image_3D, self.Camera_XYZ = [], [], [], []
with open(file, 'rb') as f:
self.dataPoinst = pickle.load(f)
self.N = len(self.dataPoinst)
print('Got {} data views'.format(self.N))
for i in range(self.N):
try:
dictionary_data = self.dataPoinst[i]
LiDAR_3D_points = dictionary_data['board_template_inside'] # N x 3
pixelsPoints = dictionary_data['pixelsPoints'] # N x 2
StereoCam_3D_points = dictionary_data['Camera_XYZ_Stereo'] # N x 3
#StereoCam_3D_points = dictionary_data['point3D_trianguate']
self.Lidar_3D.append(LiDAR_3D_points)
self.Image_2D.append(pixelsPoints)
self.Image_3D.append(StereoCam_3D_points)
if self.chess:
self.Camera_XYZ.append(dictionary_data['Camera_XYZ'])
except:
print('Cannot read data===================================================')
break
print('Lidar_3D:{}, Image_2D:{}, Image_3D:{}'.format(np.shape(self.Lidar_3D),
np.shape(self.Image_2D),
np.shape(self.Image_3D)))
Lidar_3D = np.array(self.Lidar_3D).reshape(-1, 3)
Image_3D = np.array( self.Image_3D).reshape(-1,3)
print('Lidar_3D:{}, Image_3D:{}'.format(np.shape(Lidar_3D),np.shape(Image_3D)))
#-------------------------------------#-------------------------------------
c_, R_, t_ = self.estimate(Lidar_3D,Image_3D)
#import superpose3d as super
#(RMSD, R_, t_, c_) = super.Superpose3D(Lidar_3D, Image_3D)
#print('RMSD -> {}, t_{}, c_->{}'.format(RMSD, t_, c_))
# -------------------------------------#-------------------------------------
def similarity_transform(from_points, to_points):
assert len(from_points.shape) == 2, \
"from_points must be a m x n array"
assert from_points.shape == to_points.shape, \
"from_points and to_points must have the same shape"
N, m = from_points.shape
mean_from = from_points.mean(axis=0)
mean_to = to_points.mean(axis=0)
delta_from = from_points - mean_from # N x m
delta_to = to_points - mean_to # N x m
sigma_from = (delta_from * delta_from).sum(axis=1).mean()
sigma_to = (delta_to * delta_to).sum(axis=1).mean()
cov_matrix = delta_to.T.dot(delta_from) / N
U, d, V_t = np.linalg.svd(cov_matrix, full_matrices=True)
cov_rank = np.linalg.matrix_rank(cov_matrix)
S = np.eye(m)
if cov_rank >= m - 1 and np.linalg.det(cov_matrix) < 0:
S[m - 1, m - 1] = -1
elif cov_rank < m - 1:
raise ValueError("colinearility detected in covariance matrix:\n{}".format(cov_matrix))
R = U.dot(S).dot(V_t)
c = (d * S.diagonal()).sum() / sigma_from
t = mean_to - c * R.dot(mean_from)
print('R:{},t:{},c:{}'.format(R,t,c))
return c * R, t
print('similarity_transform===============================')
from_points = Lidar_3D
to_points = Image_3D
M_ans, t_ans = similarity_transform(from_points, to_points)
H, R, T = self.get3D_3D_homography(src = Lidar_3D, dst=Image_3D)
print('H:{}, R:{}, T:{}'.format(np.shape(H), np.shape(R), np.shape(T)))
print(H)
self.fig = plt.figure(figsize=plt.figaspect(1.))
ax1 = self.fig.add_subplot(1, 1, 1, projection='3d')
#ax1.set_title('3D LiDAR')
ax1.set_xlabel('X', fontsize=8)
ax1.set_ylabel('Y', fontsize=8)
ax1.set_zlabel('Z', fontsize=8)
ax1.set_axis_off()
_3d_LIDAR = self.Lidar_3D[0]
ax1.scatter(*_3d_LIDAR.T, label = 'LiDAR')
_3d_Image = self.Image_3D[0]
ax1.scatter(*_3d_Image.T, s=25, label = 'Stereo Cam')
T = _3d_LIDAR.dot(c_ * R_) + t_
print('T -> {}'.format(np.shape(T)))
ax1.scatter(*T.T, marker='x', label='T')
d2 = distance_matrix(_3d_Image,_3d_Image)
print('d2:{}'.format(d2))
print('d2 shape :{}'.format(np.shape(d2)))
ones = np.ones(len(_3d_LIDAR))[:, np.newaxis]
transformed_ = np.hstack((_3d_LIDAR,ones))
transformed = np.dot(H, transformed_.T).T #transformation estimated with SVD
print(np.shape(transformed))
ax1.scatter(*transformed.T, s=25, label = 'ICP sol')
#ax1.set_axis_off()
primary = Lidar_3D# _3d_LIDAR
secondary = Image_3D# _3d_Image
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:, :-1]
X = pad(primary)
Y = pad(secondary)
# Solve the least squares problem X * A = Y # to find our transformation matrix A
A, res, rank, s = np.linalg.lstsq(X, Y)
transform = lambda x: unpad(np.dot(pad(x), A))
#print transform(primary)
print("Max error:", np.abs(secondary - transform(primary)).max())
trns2 = transform(_3d_LIDAR) #transformation estimated with LS
ax1.scatter(*trns2.T, label = 'least square sol')
to_points = M_ans.dot(_3d_LIDAR.T).T + t_ans
print('to_points ->{}'.format(np.shape(to_points)))
ax1.scatter(*to_points.T, label = 'to_points')
self.axisEqual3D(ax1, transformed)
ax1.legend()
plt.show()
#----------------------------------
if True:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_4.npy'
else:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_4.png')
img2 = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/right/right_4.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_4.npy'
i = 12
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
#img, img2 = cv2.imread(l), cv2.imread(r)
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/cloud_{}.npy'.format(i)
if stereoRectify and True:
img = cv2.remap(src=img, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
img2 = cv2.remap(src=img2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
#Points in LiDAR frame
LiDAR_points3D = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3] #
print('LiDAR_points3D:{}'.format(np.shape(LiDAR_points3D)))
#converted in camera frame
ones = np.ones(len(LiDAR_points3D))[:, np.newaxis]
transformed_ = np.hstack((LiDAR_points3D, ones))
Camera_points3D = np.dot(H, transformed_.T).T
#Camera_points3D = transform(LiDAR_points3D)
#print('Camera_points3D:{}'.format(np.shape(Camera_points3D)))
#Camera_points3D = LiDAR_points3D.dot(c_ * R_) + t_
#Camera_points3D = LiDAR_points3D.dot(R_) + t_
#Camera_points3D = transform(LiDAR_points3D) #transformation estimated with LS
print('Camera_points3D -> {}'.format(Camera_points3D))
rvec, _ = cv2.Rodrigues(np.eye(3))
tvec = np.zeros(3)
#Camera_points3D = LiDAR_points3D#.dot(R_) + t_
#rvec = R_
#tran = t_
#tran[0] = -0.02
#tran[1] = -0.03
print('rvec -> {}, tvec->{}'.format(np.shape(rvec),np.shape(tvec)))
print('Camera_points3D -> {}'.format(np.shape(Camera_points3D)))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(np.eye(3).T) # Change
rvec2, _ = cv2.Rodrigues(self.R.T) # Change
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
p1, _ = cv2.projectPoints(Camera_points3D[:, :3], rvec1, -t1, self.K, distCoeffs=self.D) # Change
p2, _ = cv2.projectPoints(Camera_points3D[:, :3], rvec2, -t2, self.K, distCoeffs=self.D) # Change
#points2D = [cv2.projectPoints(point, rvec, tvec, self.K, self.D)[0] for point in Camera_points3D[:, :3]]
points2D, _ = cv2.projectPoints(Camera_points3D[:, :3], np.identity(3), np.array([0., 0., 0.]), self.K, self.D)
points2D = np.asarray(points2D).squeeze()
points2D = np.asarray(p1).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D), img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img, tuple(points2D[i]), 2, (0, 255, 0), -1)
#cv2.circle(img2, tuple(points2D[i]), 2, (0, 255, 0), -1)
print('rvec -> {}, tvec->{}'.format(np.shape(rvec),np.shape(tvec)))
T_01 = np.vstack((np.hstack((np.eye(3), tvec[:,np.newaxis])), [0, 0, 0, 1])) # from lidar to right camera
T_12 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # between cameras
T_final = np.dot(T_01,T_12)
rotation, translation = T_final[:3, :3], T_final[:3, -1]
points2D = [cv2.projectPoints(point, rotation, translation, self.K, self.D)[0] for point in Camera_points3D[:, :3]]
points2D = np.asarray(points2D).squeeze()
points2D = np.asarray(p2).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D), img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img2, tuple(points2D[i]), 2, (0, 255, 0), -1)
cv2.imshow('left', cv2.resize(img,None, fx=.4, fy=.4))
cv2.imshow('right', cv2.resize(img2, None, fx=.4, fy=.4))
cv2.waitKey()
cv2.destroyAllWindows()
def drawCharuco(self, QueryImg):
points2D = np.array(self.Image_2D[0]).reshape(-1, 2)
for p in points2D:
cv2.circle(QueryImg, tuple(p), 4, (0, 0, 255), 5)
return QueryImg
def calibrate_3D_2D(self, userRansac = False):
points3D = np.array(self.Lidar_3D).reshape(-1, 3)
points2D = np.array(self.Image_2D).reshape(-1,2)
print('points3D:{}, points2D:{}'.format(np.shape(points3D),np.shape(points2D)))
# Estimate extrinsics
if userRansac:
success, rotation_vector, translation_vector, inliers = cv2.solvePnPRansac(points3D,
points2D, self.K, self.D,
flags=cv2.SOLVEPNP_ITERATIVE)
print('success:{},rotation_vector:{},translation_vector:{},inliers:{}'.format(success, np.shape(rotation_vector),
np.shape(translation_vector), np.shape(inliers)))
# Compute re-projection error.
points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
translation_vector, self.K, self.D)[0].squeeze(1)
error = (points2D_reproj - points2D)[inliers] # Compute error only over inliers.
error = np.asarray(error).squeeze()
print('points2D_reproj:{}, points2D:{},error:{}'.format(np.shape(points2D_reproj), np.shape(points2D), np.shape(error)))
rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
print('Re-projection error before LM refinement (RMSE) in px: ' + str(rmse))
# Refine estimate using LM
if not success:
print('Initial estimation unsuccessful, skipping refinement')
elif not hasattr(cv2, 'solvePnPRefineLM'):
print('solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
else:
assert len(inliers) >= 3, 'LM refinement requires at least 3 inlier points'
rotation_vector, translation_vector = cv2.solvePnPRefineLM(points3D[inliers],
points2D[inliers], self.K, self.D,
rotation_vector, translation_vector)
# Compute re-projection error.
points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
translation_vector, self.K, self.D)[0].squeeze(1)
assert (points2D_reproj.shape == points2D.shape)
error = (points2D_reproj - points2D)[inliers] # Compute error only over inliers.
error = np.array(error).squeeze()
rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
print('Re-projection error after LM refinement (RMSE) in px: ' + str(rmse))
# Convert rotation vector
#from tf.transformations import euler_from_matrix
rotation_matrix = cv2.Rodrigues(rotation_vector)[0]
euler = euler_from_matrix(rotation_matrix)
# Save extrinsics
np.savez('extrinsics{}.npz'.format('chess' if self.chess else 'charuco'),euler=euler,Rodrigues=rotation_matrix, R=rotation_vector, T=translation_vector)
# Display results
print('Euler angles (RPY):', euler)
print('Rotation Matrix Rodrigues :', rotation_matrix)
print('rotation_vector:', rotation_vector)
print('Translation Offsets:', translation_vector)
points2D = cv2.projectPoints(points3D, rotation_vector, translation_vector, self.K, self.D)[0].squeeze(1)
print('========points3D:{}, points2D:{}=================================================='.format(np.shape(points3D),np.shape(points2D)))
else:
#-------------------------------------------------------------------------------------------------
imgp = np.array([points2D], dtype=np.float32).squeeze()
objp = np.array([points3D], dtype=np.float32).squeeze()
retval, rvec, tvec = cv2.solvePnP(objp, imgp, self.K, self.D, flags=cv2.SOLVEPNP_ITERATIVE)
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
print("Transform from camera to laser")
print("T = ")
print(tvec)
print("R = ")
print(rmat)
print("Quaternion = ")
print(q)
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'solvePnP_extrinsics{}.npz'.format('chess' if self.chess else 'charuco')
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
print("Result output format: qx qy qz qw tx ty tz")
#refine results
print('refine results------------------------------------>')
rvec, tvec = cv2.solvePnPRefineLM(objp,imgp, self.K, self.D, rvec, tvec)
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
print("Transform from camera to laser")
print("T = ")
print(tvec)
print("R = ")
print(rmat)
print("Quaternion = ")
print(q)
print('Euler angles')
angles = euler_from_matrix(rmat)
print(angles)
print('euler angles ', [(180.0 / math.pi) * i for i in angles])
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'refined_solvePnP_extrinsics{}.npz'.format('chess' if self.chess else 'charuco')
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
def get_z(self, T_cam_world, T_world_pc, K):
R = T_cam_world[:3, :3]
t = T_cam_world[:3, 3]
proj_mat = np.dot(K, np.hstack((R, t[:, np.newaxis])))
xyz_hom = np.hstack((T_world_pc, np.ones((T_world_pc.shape[0], 1))))
xy_hom = np.dot(proj_mat, xyz_hom.T).T
z = xy_hom[:, -1]
z = np.asarray(z).squeeze()
return z
def callback_solvePnP(self, img, cloud_file):
#init calibraiton
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/solvePnP_extrinsics{}.npz'.format(
'chess' if self.chess else 'charuco')
calib_file_ = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/combined_extrinsics{}.npz'
with open(calib_file, 'r') as f:
data = f.read().split()
#print('data:{}'.format(data))
qx = float(data[0])
qy = float(data[1])
qz = float(data[2])
qw = float(data[3])
tx = float(data[4])
ty = float(data[5])
tz = float(data[6])
q = Quaternion(qw, qx, qy, qz).transformation_matrix
q[0, 3] = tx
q[1, 3] = ty
q[2, 3] = tz
print("Extrinsic parameter - camera to laser")
print(q)
tvec = q[:3, 3]
rot_mat = q[:3, :3]
rvec, _ = cv2.Rodrigues(rot_mat)
try:
objPoints = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
print('objPoints:{}'.format(np.shape(objPoints)))
Z = self.get_z(q, objPoints, self.K)
objPoints = objPoints[Z > 0]
#print('objPoints:{}'.format(objPoints))
img_points, _ = cv2.projectPoints(objPoints, rvec, tvec, self.K, self.D)
img_points = np.squeeze(img_points)
for i in range(len(img_points)):
try:
cv2.circle(img, (int(round(img_points[i][0])), int(round(img_points[i][1]))), 3,
(0, 255, 0), 1)
except OverflowError:
continue
if self.chess:
cv2.drawChessboardCorners(img, (10, 7), np.array(self.Image_2D).reshape(-1,2), True)
else:
self.drawCharuco(img)
except:
print('callback_solvePnP - error')
image = cv2.resize(img, None, fx=.6, fy=.6)
return image
def callback_solvePnP_Ransac(self, img, cloud_file):
points3D = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
print('points3D:{}'.format(np.shape(points3D)))
file = np.load('extrinsics{}.npz'.format('chess' if self.chess else 'charuco'))
euler = np.array(file["euler"])
rotation_matrix = np.array(file["Rodrigues"])
rotation_vector = np.array(file["R"])
translation_vector = np.array(file["T"])
print('Euler angles (RPY):', euler)
print('Rotation Matrix Rodrigues :', rotation_matrix)
print('rotation_vector:', rotation_vector)
print('Translation Offsets:', translation_vector)
rvec = rotation_matrix
#rvec, _ = cv2.Rodrigues(rotation_matrix)
print('========points3D:{}=================================================='.format(
np.shape(points3D)))
#points2D = cv2.projectPoints(points3D, rotation_vector, translation_vector, self.K, self.D)[0].squeeze(1)
#print('points2D:{}'.format(np.shape(points2D)))
points2D = [cv2.projectPoints(point, rvec, translation_vector, self.K, self.D)[0] for point in points3D[:, :3]]
points2D = np.asarray(points2D).squeeze()
print('points2D:{}, img.shape[1]:{}'.format(np.shape(points2D),img.shape[1]))
inrange = np.where(
(points2D[:, 0] >= 0) &
(points2D[:, 1] >= 0) &
(points2D[:, 0] < img.shape[1]) &
(points2D[:, 1] < img.shape[0])
)
points2D = points2D[inrange[0]].round().astype('int')
# Draw the projected 2D points
for i in range(len(points2D)):
cv2.circle(img, tuple(points2D[i]), 2, (0, 255, 0), -1)
if self.chess:
cv2.drawChessboardCorners(img, (10, 7), np.array(self.Image_2D).reshape(-1,2), True)
else:
self.drawCharuco(img)
image = cv2.resize(img, None, fx=.6, fy=.6)
return image
def callback(self):
if self.chess:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_0.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_0.npy'
else:
img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_0.png')
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_0.npy'
#img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/left/left_0.png')
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/charuco/cloud_0.npy'
#img = cv2.imread('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/left/left_0.png')
#cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/chess/cloud_0.npy'
#solvePnP_Ransac_image = self.callback_solvePnP_Ransac(img=img.copy(),cloud_file=cloud_file)
cv2.imshow('solvePnP_Ransac', cv2.resize(img,None,fx=.4,fy=.4))
cv2.waitKey()
solvePnP_image = self.callback_solvePnP(img=img.copy(),cloud_file=cloud_file)
cv2.imshow('solvePnP', solvePnP_image)
cv2.waitKey()
cv2.destroyAllWindows()
def combine_both_boards_and_train(self):
#get data from chessboard
name = 'chess'
self.file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format(name)
self.load_points()
Lidar_3D, Image_2D, Image_3D = np.array(self.Lidar_3D).reshape(-1,3), np.array(self.Image_2D).reshape(-1,2), np.array(self.Image_3D).reshape(-1,3)
#get data from charuco
name = 'charuco'
self.file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/GoodPoints_{}.pkl'.format(name)
self.load_points()
Lidar_3D, Image_2D = np.vstack((Lidar_3D, np.array(self.Lidar_3D).reshape(-1,3))), np.vstack((Image_2D, np.array(self.Image_2D).reshape(-1,2)))
print('Lidar_3D:->{}, Image_2D:->{}'.format(np.shape(Lidar_3D), np.shape(Image_2D)))
imgp = np.array([Image_2D], dtype=np.float32).squeeze()
objp = np.array([Lidar_3D], dtype=np.float32).squeeze()
retval, rvec, tvec = cv2.solvePnP(objp, imgp, self.K, self.D, flags=cv2.SOLVEPNP_ITERATIVE)
print('tvec -> {}'.format(tvec.ravel()))
rmat, jac = cv2.Rodrigues(rvec)
q = Quaternion(matrix=rmat)
angles = euler_from_matrix(rmat)
print(angles)
print('euler angles ', [(180.0 / math.pi) * i for i in angles])
print("RMSE in pixel = %f" % self.rmse(objp, imgp, self.K, self.D, rvec, tvec))
result_file = 'combined_extrinsics{}.npz'
with open(result_file, 'w') as f:
f.write("%f %f %f %f %f %f %f" % (q.x, q.y, q.z, q.w, tvec[0], tvec[1], tvec[2]))
print('Combined calibration done!!!')
def computeTransformation(self):
i = 5
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
img1 = cv2.imread(l)
img2 = cv2.imread(r)
#sift = cv2.SIFT_create()
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.8 * n.distance:
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
#F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
E, mask = cv2.findEssentialMat(pts1, pts2, self.K, cv2.RANSAC, 0.999, 1.0, None)
print(E)
points, R, t, mask = cv2.recoverPose(E, pts1, pts2, self.K)
print('R')
print(R)
angles = euler_from_matrix(R)
print('rotation angles: ', [(180.0 / math.pi) * i for i in angles])
print('t')
print(t)
for pt1, pt2 in zip(pts1, pts2):
color = tuple(np.random.randint(0, 255, 3).tolist())
img1 = cv2.circle(img1, tuple(pt1), 5, color, -1)
img2 = cv2.circle(img2, tuple(pt2), 5, color, -1)
cv2.imshow('imgL', cv2.resize(img1, None, fx=.4, fy=.4))
cv2.imshow('imgR', cv2.resize(img2, None, fx=.4, fy=.4))
cv2.waitKey(0)
cv2.destroyAllWindows()
def write_ply(self, fn, verts, colors):
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
out_colors = colors.copy()
verts = verts.reshape(-1, 3)
verts = np.hstack([verts, out_colors])
with open(fn, 'wb') as f:
f.write((ply_header % dict(vert_num=len(verts))).encode('utf-8'))
np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')
def view(self):
import glob
import open3d
file = glob.glob('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/*.ply')
pcda = []
for i, file_path in enumerate(file):
print("{} Load a ply point cloud, print it, and render it".format(file_path))
pcd = open3d.io.read_point_cloud(file_path)
pcda.append(pcd)
open3d.visualization.draw_geometries([pcd])
#o3d.visualization.draw_geometries([pcda[1], pcda[-1]])
def reproject_on_3D(self, useUnique = True):
def readCalibrationExtrinsic():
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/solvePnP_extrinsics{}.npz'.format(
'chess' if self.chess else 'charuco')
calib_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/combined_extrinsics{}.npz'
with open(calib_file, 'r') as f:
data = f.read().split()
#print('data:{}'.format(data))
qx = float(data[0])
qy = float(data[1])
qz = float(data[2])
qw = float(data[3])
tx = float(data[4])
ty = float(data[5])
tz = float(data[6])
q = Quaternion(qw, qx, qy, qz).transformation_matrix
q[0, 3],q[1, 3],q[2, 3] = tx,ty,tz
tvec = q[:3, 3]
rot_mat = q[:3, :3]
#rvec, _ = cv2.Rodrigues(rot_mat)
rvec = rot_mat
print('tvec -> {}'.format(tvec))
return rvec, tvec, q
rvec, tvec, q = readCalibrationExtrinsic()
print(self.K)
print(self.D)
print(rvec)
print(tvec)
i=1
i=11
l = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/left_{}.png'.format(i)
r = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/right_{}.png'.format(i)
imgLeft, imgRight = cv2.imread(l),cv2.imread(r)
cloud_file = '/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/cool/cloud_{}.npy'.format(i)
_3DPoints = np.array(np.load(cloud_file, mmap_mode='r'), dtype=np.float32)[:, :3]
#Left image--------------------------------------------------------------------------------------------
objPoints_left = _3DPoints.copy()
Z = self.get_z(q, objPoints_left, self.K)
objPoints_left = objPoints_left[Z > 0]
print('objPoints_left:{}'.format(np.shape(objPoints_left)))
points2D_left, _ = cv2.projectPoints(objPoints_left, rvec, tvec, self.K, self.D)
points2D_left = np.squeeze(points2D_left)
print('objPoints_left -> {}, points2D_left -> {}, '.format(np.shape(objPoints_left), np.shape(points2D_left)))
inrange_left = np.where((points2D_left[:, 0] > 0) & (points2D_left[:, 1] > 0) &
(points2D_left[:, 0] < imgLeft.shape[1]-1) & (points2D_left[:, 1] < imgLeft.shape[0]-1))
print('inrange_left : {}'.format(np.shape(inrange_left)))
points2D_left = points2D_left[inrange_left[0]].round().astype('int')
print('points2D_left:{}, '.format(np.shape(points2D_left)))
#Right image ----------------------------------------------------------------------------------------
objPoints_right = _3DPoints.copy()
Z = self.get_z(q, objPoints_right, self.K_left)
objPoints_right = objPoints_right[Z > 0]
T_01 = np.vstack((np.hstack((rvec, tvec[:, np.newaxis])), [0,0,0,1])) #from lidar to right camera
T_12 = np.vstack((np.hstack((self.R, self.T)), [0,0,0,1])) #between cameras
T_final = np.dot(T_12, T_01)
rotation, translation = T_final[:3,:3], T_final[:3,-1]
points2D_right, _ = cv2.projectPoints(objPoints_right, rotation, translation, self.K_left, self.D_left)
points2D_right = np.squeeze(points2D_right)
inrange_right = np.where((points2D_right[:, 0] >= 0) &(points2D_right[:, 1] >= 0) &
(points2D_right[:, 0] < imgRight.shape[1]-1) &(points2D_right[:, 1] < imgRight.shape[0]-1))
print('points2D_right init ->{}'.format(np.shape(points2D_right)))
points2D_right = points2D_right[inrange_right[0]].round().astype('int')
print('points2D_right now ->{}'.format(np.shape(points2D_right)))
#columns=["X", "Y", "Z","intens","ring"]
colors = np.array(np.load(cloud_file, mmap_mode='r'))[:, 3] #
# Color map for the points
colors = colors[inrange_left[0]]
cmap = matplotlib.cm.get_cmap('hsv')
colors = cmap(colors / np.max(colors))
print('colors -> {}, min:{}, max:{}'.format(np.shape(colors), np.min(colors), np.max(colors)))
colorImageLeft,colorImageRight = imgLeft.copy(),imgRight.copy()
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(20, 10.5, forward=True)
axs[0].imshow(imgLeft)
#axs[0].scatter(points2D_left[:,0],points2D_left[:,1], s=.1, c='green')
axs[0].scatter(points2D_left[:,0],points2D_left[:,1], s=.3, c=colors)
axs[0].set_title("Left image")
axs[1].set_title("Right image")
axs[1].imshow(imgRight)
#axs[1].scatter(points2D_right[:,0],points2D_right[:,1], s=.1, c='red')
# Color map for the points
colors = np.array(np.load(cloud_file, mmap_mode='r'))[:, 3] #
colors = colors[inrange_right[0]]
colors = cmap(colors / np.max(colors))
print('points2D_right->{}, colors->{}'.format(np.shape(points2D_right), np.shape(colors)))
axs[1].scatter(points2D_right[:,0],points2D_right[:,1], s=.1, c=colors)
fig.tight_layout()
plt.show()
points_left = objPoints_left[inrange_left[0]]
points_right = objPoints_right[inrange_right[0]]
print('points_left -> {}, colorImageLeft->{}'.format(np.shape(points_left), np.shape(colorImageLeft)))
print('points_right -> {}, colorImageRight->{}'.format(np.shape(points_right), np.shape(colorImageRight)))
colors_left = colorImageLeft[points2D_left[:, 1], points2D_left[:, 0], :]
colors_right = colorImageRight[points2D_right[:, 1], points2D_right[:, 0], :]
print('colors_left -> {}'.format( | np.shape(colors_left) | numpy.shape |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import tempfile
import numpy as np
import pytest
from mmcv import Config
from mmpose.datasets import DATASETS
from mmpose.datasets.builder import build_dataset
def test_body3d_h36m_dataset():
# Test Human3.6M dataset
dataset = 'Body3DH36MDataset'
dataset_class = DATASETS.get(dataset)
dataset_info = Config.fromfile(
'configs/_base_/datasets/h36m.py').dataset_info
# test single-frame input
data_cfg = dict(
num_joints=17,
seq_len=1,
seq_frame_interval=1,
joint_2d_src='pipeline',
joint_2d_det_file=None,
causal=False,
need_camera_param=True,
camera_param_file='tests/data/h36m/cameras.pkl')
_ = dataset_class(
ann_file='tests/data/h36m/test_h36m_body3d.npz',
img_prefix='tests/data/h36m',
data_cfg=data_cfg,
dataset_info=dataset_info,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/h36m/test_h36m_body3d.npz',
img_prefix='tests/data/h36m',
data_cfg=data_cfg,
dataset_info=dataset_info,
pipeline=[],
test_mode=True)
assert custom_dataset.dataset_name == 'h36m'
assert custom_dataset.test_mode is True
_ = custom_dataset[0]
results = []
for result in custom_dataset:
results.append({
'preds': result['target'][None, ...],
'target_image_paths': [result['target_image_path']],
})
metrics = ['mpjpe', 'p-mpjpe', 'n-mpjpe']
infos = custom_dataset.evaluate(results, metric=metrics)
| np.testing.assert_almost_equal(infos['MPJPE'], 0.0) | numpy.testing.assert_almost_equal |
#!/usr/bin/env python3
from os import mkdir, rename
from os.path import isdir, isfile
import time
import numpy as np
from matplotlib import pyplot as plt
import h5py
from scipy.optimize import minimize
from LoLIM.utilities import v_air, RTD, processed_data_dir, logger
from LoLIM.IO.raw_tbb_IO import MultiFile_Dal1, filePaths_by_stationName, read_antenna_delays, read_station_delays, read_bad_antennas, read_antenna_pol_flips
from LoLIM.findRFI import FindRFI, window_and_filter
from LoLIM.signal_processing import half_hann_window, remove_saturation, num_double_zeros
from LoLIM.antenna_response import LBA_antenna_model
from LoLIM.interferometry import impulsive_imager_tools as II_tools
def do_nothing(*A, **B):
pass
#### some algorithms for choosing pairs of antennas ####
def pairData_NumAntPerStat(input_files, num_ant_per_stat=1, bad_antennas=[]):
"""return the antenna-pair data for choosing one antenna per station"""
num_stations = len(input_files)
even_bad_antennas = [A[0] for A in bad_antennas]
antennas = []
for file_index in range(num_stations):
ant_names = input_files[file_index].get_antenna_names()
num_antennas_in_station = len( ant_names )
for x in range(num_ant_per_stat):
if x*2 < num_antennas_in_station and ant_names[x*2] not in even_bad_antennas:
antennas.append( [file_index,x*2] )
return np.array(antennas, dtype=int)
def pairData_NumAntPerStat_PolO(input_files, num_ant_per_stat=1, bad_antennas=[]):
"""return the antenna-pair data for choosing one antenna per station"""
num_stations = len(input_files)
even_bad_antennas = [A[0] for A in bad_antennas]
antennas = []
for file_index in range(num_stations):
ant_names = input_files[file_index].get_antenna_names()
num_antennas_in_station = len( ant_names )
for x in range(num_ant_per_stat):
if x*2 < num_antennas_in_station and ant_names[x*2] not in even_bad_antennas:
antennas.append( [file_index,(x*2)+1] )
return np.array(antennas, dtype=int)
def pairData_NumAntPerStat_DualPol(input_files, num_ant_per_stat=1, bad_antennas=[]):
"""return the antenna-pair data for choosing one antenna per station"""
num_stations = len(input_files)
even_bad_antennas = [A[0] for A in bad_antennas]
antennas = []
for file_index in range(num_stations):
ant_names = input_files[file_index].get_antenna_names()
num_antennas_in_station = len( ant_names )
for x in range(num_ant_per_stat):
if x*2 < num_antennas_in_station and ant_names[x*2] not in even_bad_antennas:
antennas.append( [file_index,x*2] )
antennas.append( [file_index,x*2+1] )
return | np.array(antennas, dtype=int) | numpy.array |
import numpy as np
import h5py
import os
import time
import cPickle as pkl
import atexit, signal
from time import strftime, sleep
from itertools import cycle, product
from multiprocessing import Process, Queue, sharedctypes
from dataset import Dataset
from trajectory import traj_iou, object_trajectory_proposal
from baseline import *
class SharedArray(object):
"""Numpy array that uses sharedctypes to store data"""
def __init__(self, shape, dtype=np.float32):
# Compute total number of elements
size = | np.prod(shape) | numpy.prod |
"""
Linear State Beam Element Class
"""
from sharpy.linear.utils.ss_interface import BaseElement, linear_system, LinearVector
import sharpy.linear.src.lingebm as lingebm
import numpy as np
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
@linear_system
class LinearBeam(BaseElement):
r"""
State space member
Define class for linear state-space realisation of GEBM flexible-body
equations from SHARPy ``timestep_info`` class and with the nonlinear structural information.
State-space models can be defined in continuous or discrete time (dt
required). Modal projection, either on the damped or undamped modal shapes,
is also avaiable.
Notes on the settings:
a. ``modal_projection={True,False}``: determines whether to project the states
onto modal coordinates. Projection over damped or undamped modal
shapes can be obtained selecting:
- ``proj_modes = {'damped','undamped'}``
while
- ``inout_coords={'modes','nodal'}``
determines whether the modal state-space inputs/outputs are modal
coords or nodal degrees-of-freedom. If ``modes`` is selected, the
``Kin`` and ``Kout`` gain matrices are generated to transform nodal to modal
dofs
b. ``dlti={True,False}``: if true, generates discrete-time system.
The continuous to discrete transformation method is determined by::
discr_method={ 'newmark', # Newmark-beta
'zoh', # Zero-order hold
'bilinear'} # Bilinear (Tustin) transformation
DLTIs can be obtained directly using the Newmark-:math:`\beta` method
``discr_method='newmark'``
``newmark_damp=xx`` with ``xx<<1.0``
for full-states descriptions (``modal_projection=False``) and modal projection
over the undamped structural modes (``modal_projection=True`` and ``proj_modes``).
The Zero-order holder and bilinear methods, instead, work in all
descriptions, but require the continuous state-space equations.
"""
sys_id = "LinearBeam"
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_options = dict()
settings_default['modal_projection'] = True
settings_types['modal_projection'] = 'bool'
settings_description['modal_projection'] = 'Use modal projection'
settings_default['inout_coords'] = 'nodes'
settings_types['inout_coords'] = 'str'
settings_description['inout_coords'] = 'Beam state space input/output coordinates'
settings_options['inout_coords'] = ['nodes', 'modes']
settings_types['num_modes'] = 'int'
settings_default['num_modes'] = 10
settings_description['num_modes'] = 'Number of modes to retain'
settings_default['discrete_time'] = True
settings_types['discrete_time'] = 'bool'
settings_description['discrete_time'] = 'Assemble beam in discrete time'
settings_default['dt'] = 0.001
settings_types['dt'] = 'float'
settings_description['dt'] = 'Discrete time system integration time step'
settings_default['proj_modes'] = 'undamped'
settings_types['proj_modes'] = 'str'
settings_description['proj_modes'] = 'Use ``undamped`` or ``damped`` modes'
settings_options['proj_modes'] = ['damped', 'undamped']
settings_default['discr_method'] = 'newmark'
settings_types['discr_method'] = 'str'
settings_description['discr_method'] = 'Discrete time assembly system method:'
settings_options['discr_method'] = ['newmark', 'zoh', 'bilinear']
settings_default['newmark_damp'] = 1e-4
settings_types['newmark_damp'] = 'float'
settings_description['newmark_damp'] = 'Newmark damping value. For systems assembled using ``newmark``'
settings_default['use_euler'] = False
settings_types['use_euler'] = 'bool'
settings_description['use_euler'] = 'Use euler angles for rigid body parametrisation'
settings_default['print_info'] = True
settings_types['print_info'] = 'bool'
settings_description['print_info'] = 'Display information on screen'
settings_default['gravity'] = False
settings_types['gravity'] = 'bool'
settings_description['gravity'] = 'Linearise gravitational forces'
settings_types['remove_dofs'] = 'list(str)'
settings_default['remove_dofs'] = []
settings_description['remove_dofs'] = 'Remove desired degrees of freedom'
settings_options['remove_dofs'] = ['eta', 'V', 'W', 'orient']
settings_types['remove_sym_modes'] = 'bool'
settings_default['remove_sym_modes'] = False
settings_description['remove_sym_modes'] = 'Remove symmetric modes if wing is clamped'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description, settings_options)
def __init__(self):
self.sys = None # The actual object
self.ss = None # The state space object
self.clamped = None
self.tsstruct0 = None
self.settings = dict()
self.state_variables = None
self.linearisation_vectors = dict()
def initialise(self, data, custom_settings=None):
if custom_settings:
self.settings = custom_settings
else:
try:
self.settings = data.settings['LinearAssembler']['linear_system_settings']
except KeyError:
pass
settings.to_custom_types(self.settings, self.settings_types, self.settings_default,
self.settings_options, no_ctype=True)
beam = lingebm.FlexDynamic(data.linear.tsstruct0, data.structure, self.settings)
self.sys = beam
self.tsstruct0 = data.linear.tsstruct0
# State variables
num_dof_flex = self.sys.structure.num_dof.value
num_dof_rig = self.sys.Mstr.shape[0] - num_dof_flex
state_db = {'eta': [0, num_dof_flex],
'V_bar': [num_dof_flex, num_dof_flex + 3],
'W_bar': [num_dof_flex + 3, num_dof_flex + 6],
'orient_bar': [num_dof_flex + 6, num_dof_flex + num_dof_rig],
'dot_eta': [num_dof_flex + num_dof_rig, 2 * num_dof_flex + num_dof_rig],
'V': [2 * num_dof_flex + num_dof_rig, 2 * num_dof_flex + num_dof_rig + 3],
'W': [2 * num_dof_flex + num_dof_rig + 3, 2 * num_dof_flex + num_dof_rig + 6],
'orient': [2 * num_dof_flex + num_dof_rig + 6, 2 * num_dof_flex + 2 * num_dof_rig]}
self.state_variables = LinearVector(state_db, self.sys_id)
if num_dof_rig == 0:
self.clamped = True
self.linearisation_vectors['eta'] = self.tsstruct0.q
self.linearisation_vectors['eta_dot'] = self.tsstruct0.dqdt
self.linearisation_vectors['forces_struct'] = self.tsstruct0.steady_applied_forces.reshape(-1, order='C')
def assemble(self, t_ref=None):
"""
Assemble the beam state-space system.
Args:
t_ref (float): Scaling factor to non-dimensionalise the beam's time step.
Returns:
"""
if self.settings['gravity']:
self.sys.linearise_gravity_forces()
if self.settings['remove_dofs']:
self.trim_nodes(self.settings['remove_dofs'])
if self.settings['modal_projection'] and self.settings['remove_sym_modes'] and self.clamped:
self.remove_symmetric_modes()
if t_ref is not None:
self.sys.scale_system_normalised_time(t_ref)
# import sharpy.linear.assembler.linearthrust as linearthrust
# engine = linearthrust.LinearThrust()
# engine.initialise()
# K_thrust = engine.generate(self.tsstruct0, self.sys)
#
# self.sys.Kstr += K_thrust
self.sys.assemble()
# TODO: remove integrals of the rigid body modes (and change mode shapes to account for this in the coupling matrices)
# Option to remove certain dofs via dict: i.e. dofs to remove
# Map dofs to equations
# Boundary conditions
# Same with modal, remove certain modes. Need to specify that modes to keep refer to flexible ones only
if self.sys.SSdisc:
self.ss = self.sys.SSdisc
elif self.sys.SScont:
self.ss = self.sys.SScont
return self.ss
def x0(self):
x = np.concatenate((self.tsstruct0.q, self.tsstruct0.dqdt))
return x
def trim_nodes(self, trim_list=list):
num_dof_flex = self.sys.structure.num_dof.value
num_dof_rig = self.sys.Mstr.shape[0] - num_dof_flex
# Dictionary containing DOFs and corresponding equations
dof_db = {'eta': [0, num_dof_flex, 1],
'V': [num_dof_flex, num_dof_flex + 3, 2],
'W': [num_dof_flex + 3, num_dof_flex + 6, 3],
'orient': [num_dof_flex + 6, num_dof_flex + num_dof_rig, 4],
'yaw': [num_dof_flex + 8, num_dof_flex + num_dof_rig, 1]}
# -----------------------------------------------------------------------
# Better to place in a function available to all elements since it will equally apply
# Therefore, the dof_db should be a class attribute
# Take away alongside the vector variable class
# All variables
vec_db = dict()
for item in dof_db:
vector_var = VectorVariable(item, dof_db[item], 'LinearBeam')
vec_db[item] = vector_var
used_vars_db = vec_db.copy()
# Variables to remove
removed_dofs = 0
removed_db = dict()
for item in trim_list:
removed_db[item] = vec_db[item]
removed_dofs += vec_db[item].size
del used_vars_db[item]
# Update variables position
for rem_item in removed_db:
for item in used_vars_db:
if used_vars_db[item].rows_loc[0] < removed_db[rem_item].first_pos:
continue
else:
# Update order and position
used_vars_db[item].first_pos -= removed_db[rem_item].size
used_vars_db[item].end_pos -= removed_db[rem_item].size
self.state_variables = used_vars_db
# TODO: input and output variables
### ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Map dofs to equations
trim_matrix = np.zeros((num_dof_rig+num_dof_flex, num_dof_flex+num_dof_rig-removed_dofs))
for item in used_vars_db:
trim_matrix[used_vars_db[item].rows_loc, used_vars_db[item].cols_loc] = 1
# Update matrices
self.sys.Mstr = trim_matrix.T.dot(self.sys.Mstr.dot(trim_matrix))
self.sys.Cstr = trim_matrix.T.dot(self.sys.Cstr.dot(trim_matrix))
self.sys.Kstr = trim_matrix.T.dot(self.sys.Kstr.dot(trim_matrix))
def remove_symmetric_modes(self):
"""
Removes symmetric modes when the wing is clamped at the midpoint.
It will force the wing tip displacements in ``z`` to be postive for all modes.
Updates the mode shapes matrix, the natural frequencies and the number of modes.
"""
# Group modes into symmetric and anti-symmetric modes
modes_sym = np.zeros_like(self.sys.U) # grouped modes
total_modes = self.sys.num_modes
for i in range(total_modes//2):
je = 2*i
jo = 2*i + 1
modes_sym[:, je] = 1./np.sqrt(2)*(self.sys.U[:, je] + self.sys.U[:, jo])
modes_sym[:, jo] = 1./np.sqrt(2)*(self.sys.U[:, je] - self.sys.U[:, jo])
self.sys.U = modes_sym
# Remove anti-symmetric modes
# Wing 1 and 2 nodes
# z-displacement index
ind_w1 = [6*i + 2 for i in range(self.sys.structure.num_node // 2)] # Wing 1 nodes are in the first half rows
ind_w1_x = [6*i for i in range(self.sys.structure.num_node // 2)] # Wing 1 nodes are in the first half rows
ind_w1_y = [6*i + 1 for i in range(self.sys.structure.num_node // 2)] # Wing 1 nodes are in the first half rows
ind_w2 = [6*i + 2 for i in range(self.sys.structure.num_node // 2, self.sys.structure.num_node - 1)] # Wing 2 nodes are in the second half rows
sym_mode_index = []
for i in range(self.sys.num_modes//2):
found_symmetric = False
for j in range(2):
ind = 2*i + j
# Maximum z displacement for wings 1 and 2
ind_max_w1 = np.argmax(np.abs(modes_sym[ind_w1, ind]))
ind_max_w2 = np.argmax(np.abs(modes_sym[ind_w2, ind]))
z_max_w1 = modes_sym[ind_w1, ind][ind_max_w1]
z_max_w2 = modes_sym[ind_w2, ind][ind_max_w2]
z_max_diff = np.abs(z_max_w1 - z_max_w2)
if z_max_diff < | np.abs(z_max_w1 + z_max_w2) | numpy.abs |
"""Rangeland Production Model."""
import os
import logging
import tempfile
import shutil
from builtins import range
import re
import math
import pickle
import numpy
import pandas
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import pygeoprocessing
from rangeland_production import utils
from rangeland_production import validation
LOGGER = logging.getLogger('rangeland_production.forage')
# we only have these types of soils
SOIL_TYPE_LIST = ['clay', 'silt', 'sand']
# temporary directory to store intermediate files
PROCESSING_DIR = None
# user-supplied crude protein of vegetation
CRUDE_PROTEIN = None
# state variables and parameters take their names from Century
# _SITE_STATE_VARIABLE_FILES contains state variables that are a
# property of the site, including:
# carbon in each soil compartment
# (structural, metabolic, som1, som2, som3) and layer (1=surface, 2=soil)
# e.g., som2c_2 = carbon in soil som2;
# N and P in each soil layer and compartment (1=N, 2=P)
# e.g., som2e_1_1 = N in surface som2, som2e_1_2 = P in surface som2;
# water in each soil layer, asmos_<layer>
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_SITE_STATE_VARIABLE_FILES = {
'metabc_1_path': 'metabc_1.tif',
'metabc_2_path': 'metabc_2.tif',
'som1c_1_path': 'som1c_1.tif',
'som1c_2_path': 'som1c_2.tif',
'som2c_1_path': 'som2c_1.tif',
'som2c_2_path': 'som2c_2.tif',
'som3c_path': 'som3c.tif',
'strucc_1_path': 'strucc_1.tif',
'strucc_2_path': 'strucc_2.tif',
'strlig_1_path': 'strlig_1.tif',
'strlig_2_path': 'strlig_2.tif',
'metabe_1_1_path': 'metabe_1_1.tif',
'metabe_2_1_path': 'metabe_2_1.tif',
'som1e_1_1_path': 'som1e_1_1.tif',
'som1e_2_1_path': 'som1e_2_1.tif',
'som2e_1_1_path': 'som2e_1_1.tif',
'som2e_2_1_path': 'som2e_2_1.tif',
'som3e_1_path': 'som3e_1.tif',
'struce_1_1_path': 'struce_1_1.tif',
'struce_2_1_path': 'struce_2_1.tif',
'metabe_1_2_path': 'metabe_1_2.tif',
'metabe_2_2_path': 'metabe_2_2.tif',
'plabil_path': 'plabil.tif',
'secndy_2_path': 'secndy_2.tif',
'parent_2_path': 'parent_2.tif',
'occlud_path': 'occlud.tif',
'som1e_1_2_path': 'som1e_1_2.tif',
'som1e_2_2_path': 'som1e_2_2.tif',
'som2e_1_2_path': 'som2e_1_2.tif',
'som2e_2_2_path': 'som2e_2_2.tif',
'som3e_2_path': 'som3e_2.tif',
'struce_1_2_path': 'struce_1_2.tif',
'struce_2_2_path': 'struce_2_2.tif',
'asmos_1_path': 'asmos_1.tif',
'asmos_2_path': 'asmos_2.tif',
'asmos_3_path': 'asmos_3.tif',
'asmos_4_path': 'asmos_4.tif',
'asmos_5_path': 'asmos_5.tif',
'asmos_6_path': 'asmos_6.tif',
'asmos_7_path': 'asmos_7.tif',
'asmos_8_path': 'asmos_8.tif',
'asmos_9_path': 'asmos_9.tif',
'avh2o_3_path': 'avh2o_3.tif',
'minerl_1_1_path': 'minerl_1_1.tif',
'minerl_2_1_path': 'minerl_2_1.tif',
'minerl_3_1_path': 'minerl_3_1.tif',
'minerl_4_1_path': 'minerl_4_1.tif',
'minerl_5_1_path': 'minerl_5_1.tif',
'minerl_6_1_path': 'minerl_6_1.tif',
'minerl_7_1_path': 'minerl_7_1.tif',
'minerl_8_1_path': 'minerl_8_1.tif',
'minerl_9_1_path': 'minerl_9_1.tif',
'minerl_10_1_path': 'minerl_10_1.tif',
'minerl_1_2_path': 'minerl_1_2.tif',
'minerl_2_2_path': 'minerl_2_2.tif',
'minerl_3_2_path': 'minerl_3_2.tif',
'minerl_4_2_path': 'minerl_4_2.tif',
'minerl_5_2_path': 'minerl_5_2.tif',
'minerl_6_2_path': 'minerl_6_2.tif',
'minerl_7_2_path': 'minerl_7_2.tif',
'minerl_8_2_path': 'minerl_8_2.tif',
'minerl_9_2_path': 'minerl_9_2.tif',
'minerl_10_2_path': 'minerl_10_2.tif',
'snow_path': 'snow.tif',
'snlq_path': 'snlq.tif',
}
# _PFT_STATE_VARIABLES contains state variables that are a
# property of a PFT, including:
# carbon, nitrogen, and phosphorous in aboveground biomass
# where 1=N, 2=P
# e.g. aglivc = C in aboveground live biomass,
# aglive_1 = N in aboveground live biomass;
# carbon, nitrogen, and phosphorous in aboveground standing dead
# biomass, stdedc and stdede;
# carbon, nitrogen and phosphorous in belowground live biomass,
# aglivc and aglive
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_PFT_STATE_VARIABLES = [
'aglivc', 'bglivc', 'stdedc', 'aglive_1', 'bglive_1',
'stdede_1', 'aglive_2', 'bglive_2', 'stdede_2', 'avh2o_1',
'crpstg_1', 'crpstg_2',
]
# intermediate parameters that do not change between timesteps,
# including field capacity and wilting point of each soil layer,
# coefficients describing effect of soil texture on decomposition
# rates
_PERSISTENT_PARAMS_FILES = {
'afiel_1_path': 'afiel_1.tif',
'afiel_2_path': 'afiel_2.tif',
'afiel_3_path': 'afiel_3.tif',
'afiel_4_path': 'afiel_4.tif',
'afiel_5_path': 'afiel_5.tif',
'afiel_6_path': 'afiel_6.tif',
'afiel_7_path': 'afiel_7.tif',
'afiel_8_path': 'afiel_8.tif',
'afiel_9_path': 'afiel_9.tif',
'awilt_1_path': 'awilt_1.tif',
'awilt_2_path': 'awilt_2.tif',
'awilt_3_path': 'awilt_3.tif',
'awilt_4_path': 'awilt_4.tif',
'awilt_5_path': 'awilt_5.tif',
'awilt_6_path': 'awilt_6.tif',
'awilt_7_path': 'awilt_7.tif',
'awilt_8_path': 'awilt_8.tif',
'awilt_9_path': 'awilt_9.tif',
'wc_path': 'wc.tif',
'eftext_path': 'eftext.tif',
'p1co2_2_path': 'p1co2_2.tif',
'fps1s3_path': 'fps1s3.tif',
'orglch_path': 'orglch.tif',
'fps2s3_path': 'fps2s3.tif',
'rnewas_1_1_path': 'rnewas_1_1.tif',
'rnewas_2_1_path': 'rnewas_2_1.tif',
'rnewas_1_2_path': 'rnewas_1_2.tif',
'rnewas_2_2_path': 'rnewas_2_2.tif',
'rnewbs_1_1_path': 'rnewbs_1_1.tif',
'rnewbs_1_2_path': 'rnewbs_1_2.tif',
'rnewbs_2_1_path': 'rnewbs_2_1.tif',
'rnewbs_2_2_path': 'rnewbs_2_2.tif',
'vlossg_path': 'vlossg.tif',
}
# site-level values that are updated once per year
_YEARLY_FILES = {
'annual_precip_path': 'annual_precip.tif',
'baseNdep_path': 'baseNdep.tif',
}
# pft-level values that are updated once per year
_YEARLY_PFT_FILES = ['pltlig_above', 'pltlig_below']
# intermediate values for each plant functional type that are shared
# between submodels, but do not need to be saved as output
_PFT_INTERMEDIATE_VALUES = [
'h2ogef_1', 'tgprod_pot_prod',
'cercrp_min_above_1', 'cercrp_min_above_2',
'cercrp_max_above_1', 'cercrp_max_above_2',
'cercrp_min_below_1', 'cercrp_min_below_2',
'cercrp_max_below_1', 'cercrp_max_below_2',
'tgprod', 'rtsh', 'flgrem', 'fdgrem']
# intermediate site-level values that are shared between submodels,
# but do not need to be saved as output
_SITE_INTERMEDIATE_VALUES = [
'amov_1', 'amov_2', 'amov_3', 'amov_4', 'amov_5', 'amov_6', 'amov_7',
'amov_8', 'amov_9', 'amov_10', 'snowmelt', 'bgwfunc', 'diet_sufficiency']
# fixed parameters for each grazing animal type are adapted from the GRAZPLAN
# model as described by Freer et al. 2012, "The GRAZPLAN animal biology model
# for sheep and cattle and the GrazFeed decision support tool"
_FREER_PARAM_DICT = {
'b_indicus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.31,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'b_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.36,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'indicus_x_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.335,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'sheep': {
'CN1': 0.0157,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.04,
'CI2': 1.7,
'CI8': 28,
'CI9': 1.4,
'CI12': 0.15,
'CI13': 0.02,
'CI14': 0.002,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00112,
'CR5': 0.6,
'CR6': 0.00112,
'CR7': 0,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.486,
'CL1': 2,
'CL2': 22,
'CL3': 1,
'CL5': 0.94,
'CL6': 4.7,
'CL15': 0.045,
'CM1': 0.09,
'CM2': 0.26,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.02,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CW1': 24,
'CW2': 0.004,
'CW3': 0.7,
'CW5': 0.25,
'CW6': 0.072,
'CW7': 1.35,
'CW8': 0.016,
'CW9': 1,
'CW12': 0.025,
'CP1': 150,
'CP4': 0.33,
'CP5': 1.43,
'CP6': 3.38,
'CP7': 0.91,
'CP8': 4.33,
'CP9': 4.37,
'CP10': 0.965,
'CP15': 0.1,
},
}
# Target nodata is for general rasters that are positive, and _IC_NODATA are
# for rasters that are any range
_TARGET_NODATA = -1.0
_IC_NODATA = float(numpy.finfo('float32').min)
# SV_NODATA is for state variables
_SV_NODATA = -1.0
def execute(args):
"""InVEST Forage Model.
[model description]
Parameters:
args['workspace_dir'] (string): path to target output workspace.
args['results_suffix'] (string): (optional) string to append to any
output file names
args['starting_month'] (int): what month to start reporting where
the range 1..12 is equivalent to Jan..Dec.
args['starting_year'] (int): what year to start runs. this value is
used to notate outputs in the form [month_int]_[year]
args['n_months'] (int): number of months to run model, the model run
will start reporting in `args['starting_month']`.
args['aoi_path'] (string): path to polygon vector indicating the
desired spatial extent of the model. This has the effect of
clipping the computational area of the input datasets to be the
area intersected by this polygon.
args['management_threshold'] (float): biomass in kg/ha required to be
left standing at each model step after offtake by grazing animals
args['proportion_legume_path'] (string): path to raster containing
fraction of pasture that is legume, by weight
args['bulk_density_path'] (string): path to bulk density raster.
args['ph_path'] (string): path to soil pH raster.
args['clay_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is clay
args['silt_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is silt
args['sand_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is sand
args['precip_dir'] (string): path to a directory containing monthly
precipitation rasters. The model requires at least 12 months of
precipitation and expects to find a precipitation file input for
every month of the simulation, so the number of precipitation
files should be the maximum of 12 and `n_months`. The file name of
each precipitation raster must end with the year, followed by an
underscore, followed by the month number. E.g., Precip_2016_1.tif
for January of 2016.
args['min_temp_dir'] (string): path to a directory containing monthly
minimum temperature rasters. The model requires one minimum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
minimum temperature raster must end with the month number. E.g.,
Min_temperature_1.tif for January.
args['max_temp_dir'] (string): path to a directory containing monthly
maximum temperature rasters. The model requires one maximum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
maximum temperature raster must end with the month number. E.g.,
Max_temperature_1.tif for January.
args['site_param_table'] (string): path to csv file giving site
parameters. This file must contain a column named "site" that
contains unique integers. These integer values correspond to site
type identifiers which are values in the site parameter spatial
index raster. Other required fields for this table are site and
"fixed" parameters from the Century model, i.e., the parameters
in the Century input files site.100 and fix.100.
args['site_param_spatial_index_path'] (string): path to a raster file
that indexes site parameters, indicating which set of site
parameter values should apply at each pixel in the raster. The
raster should be composed of integers that correspond to values in
the field "site" in `site_param_table`.
args['veg_trait_path'] (string): path to csv file giving vegetation
traits for each plant functional type available for grazing. This
file must contain a column named "PFT" that contains unique
integers. These integer values correspond to PFT identifiers of
veg spatial composition rasters. Other required fields for this
table are vegetation input parameters from the Century model, for
example maximum intrinsic growth rate, optimum temperature for
production, minimum C/N ratio, etc.
args['veg_spatial_composition_path_pattern'] (string): path to
vegetation rasters, one per plant functional type available for
grazing, where <PFT> can be replaced with an integer that is
indexed in the veg trait csv.
Example: if this value is given as `./vegetation/pft_<PFT>.tif`
and the directory `./vegetation/` contains these files:
"pft_1.tif"
"pft_12.tif"
"pft_50.tif",
then the "PFT" field in the vegetation trait table must contain
the values 1, 12, and 50.
args['animal_trait_path'] (string): path to csv file giving animal
traits for each animal type - number - duration combination. This
table must contain a column named "animal_id" that contains unique
integers. These integer values correspond to features in the
animal management layer.
Other required fields in this table are:
type (allowable values: b_indicus, b_taurus,
indicus_x_taurus, sheep, camelid, hindgut_fermenter)
sex (allowable values: entire_m, castrate, breeding_female,
NA)
age (days)
weight (kg)
SRW (standard reference weight, kg; the weight of a mature
female in median condition)
SFW (standard fleece weight, kg; the average weight of fleece
of a mature adult; for sheep only)
birth_weight (kg)
grz_months (a string of integers, separated by ','; months of
the simulation when animals are present,
relative to `starting_month`. For example, if `n_months`
is 3, and animals are present during the entire simulation
period, `grz_months` should be "1,2,3")
args['animal_grazing_areas_path'] (string): path to animal vector
inputs giving the location of grazing animals. Must have a field
named "animal_id", containing unique integers that correspond to
the values in the "animal_id" column of the animal trait csv, and
a field named "num_animal" giving the number of animals grazing
inside each polygon feature.
args['initial_conditions_dir'] (string): optional input, path to
directory containing initial conditions. If this directory is not
supplied, a site_initial_table and pft_initial_table must be
supplied. If supplied, this directory must contain a series of
rasters with initial values for each PFT and for the site.
Required rasters for each PFT:
initial variables that are a property of PFT in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
e.g., aglivc_<PFT>.tif
Required for the site:
initial variables that are a property of site in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['site_initial_table'] (string): optional input, path to table
containing initial conditions for each site state variable. If an
initial conditions directory is not supplied, this table must be
supplied. This table must contain a value for each site code and
each state variable listed in the following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['pft_initial_table'] (string): optional input, path to table
containing initial conditions for each plant functional type state
variable. If an initial conditions directory is not supplied, this
table must be supplied. This table must contain a value for each
plant functional type index and each state variable listed in the
following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['save_sv_rasters'] (boolean): optional input, default false.
Should rasters containing all state variables be saved for each
model time step?
args['animal_density'] (string): optional input, density of grazing
animals in animals per hectare.
args['crude_protein'] (float): optional input, crude protein
concentration of forage for the purposes of animal diet selection.
Should be a value between 0-1. If included, this value is
substituted for N content of forage when calculating digestibility
and "ingestibility" of forage, and protein content of the diet, for
grazing animals.
Returns:
None.
"""
LOGGER.info("model execute: %s", args)
starting_month = int(args['starting_month'])
starting_year = int(args['starting_year'])
n_months = int(args['n_months'])
try:
delete_sv_folders = not args['save_sv_rasters']
except KeyError:
delete_sv_folders = True
try:
global CRUDE_PROTEIN
CRUDE_PROTEIN = args['crude_protein']
except KeyError:
pass
try:
animal_density_path = args['animal_density']
except KeyError:
args['animal_density'] = None
# this set will build up the integer months that are used so we can index
# them with temperature later
temperature_month_set = set()
# this dict will be used to build the set of input rasters associated with
# a reasonable lookup ID so we can have a nice dataset to align for raster
# stack operations
base_align_raster_path_id_map = {}
precip_dir_list = [
os.path.join(args['precip_dir'], f) for f in
os.listdir(args['precip_dir'])]
for month_index in range(n_months):
month_i = (starting_month + month_index - 1) % 12 + 1
temperature_month_set.add(month_i)
year = starting_year + (starting_month + month_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No precipitation data found for year %d, month %d" %
(year, month_i))
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_{}'.format(month_index)] = file_list[0]
# the model requires 12 months of precipitation data to calculate
# atmospheric N deposition and potential production from annual precip
n_precip_months = int(args['n_months'])
if n_precip_months < 12:
m_index = int(args['n_months'])
while n_precip_months < 12:
month_i = (starting_month + m_index - 1) % 12 + 1
year = starting_year + (starting_month + m_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
break
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_%d' % m_index] = file_list[0]
n_precip_months = n_precip_months + 1
m_index = m_index + 1
if n_precip_months < 12:
raise ValueError("At least 12 months of precipitation data required")
# collect monthly temperature data
min_temp_dir_list = [
os.path.join(args['min_temp_dir'], f) for f in
os.listdir(args['min_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in min_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No minimum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'min_temp_%d' % month_i] = file_list[0]
max_temp_dir_list = [
os.path.join(args['max_temp_dir'], f) for f in
os.listdir(args['max_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in max_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No maximum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'max_temp_%d' % month_i] = file_list[0]
# lookup to provide path to soil percent given soil type
for soil_type in SOIL_TYPE_LIST:
base_align_raster_path_id_map[soil_type] = (
args['%s_proportion_path' % soil_type])
if not os.path.exists(base_align_raster_path_id_map[soil_type]):
raise ValueError(
"Couldn't find %s for %s" % (
base_align_raster_path_id_map[soil_type], soil_type))
base_align_raster_path_id_map['bulk_d_path'] = args['bulk_density_path']
base_align_raster_path_id_map['ph_path'] = args['ph_path']
# make sure site initial conditions and parameters exist for each site
# identifier
base_align_raster_path_id_map['site_index'] = (
args['site_param_spatial_index_path'])
n_bands = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['n_bands']
if n_bands > 1:
raise ValueError(
'Site spatial index raster must contain only one band')
site_datatype = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['datatype']
if site_datatype not in [1, 2, 3, 4, 5]:
raise ValueError('Site spatial index raster must be integer type')
# get unique values in site param raster
site_index_set = set()
for offset_map, raster_block in pygeoprocessing.iterblocks(
(args['site_param_spatial_index_path'], 1)):
site_index_set.update(numpy.unique(raster_block))
site_nodata = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['nodata'][0]
if site_nodata in site_index_set:
site_index_set.remove(site_nodata)
site_param_table = utils.build_lookup_from_csv(
args['site_param_table'], 'site')
missing_site_index_list = list(
site_index_set.difference(site_param_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find parameter values for the following site " +
"indices: %s\n\t" + ", ".join(missing_site_index_list))
# make sure plant functional type parameters exist for each pft raster
pft_dir = os.path.dirname(args['veg_spatial_composition_path_pattern'])
pft_basename = os.path.basename(
args['veg_spatial_composition_path_pattern'])
files = [
f for f in os.listdir(pft_dir) if os.path.isfile(
os.path.join(pft_dir, f))]
pft_regex = re.compile(pft_basename.replace('<PFT>', r'(\d+)'))
pft_matches = [
m for m in [pft_regex.search(f) for f in files] if m is not None]
pft_id_set = set([int(m.group(1)) for m in pft_matches])
for pft_i in pft_id_set:
pft_path = args['veg_spatial_composition_path_pattern'].replace(
'<PFT>', '%d' % pft_i)
base_align_raster_path_id_map['pft_%d' % pft_i] = pft_path
veg_trait_table = utils.build_lookup_from_csv(
args['veg_trait_path'], 'PFT')
missing_pft_trait_list = pft_id_set.difference(veg_trait_table.keys())
if missing_pft_trait_list:
raise ValueError(
"Couldn't find trait values for the following plant functional " +
"types: %s\n\t" + ", ".join(missing_pft_trait_list))
frtcindx_set = set([
pft_i['frtcindx'] for pft_i in veg_trait_table.values()])
if frtcindx_set.difference(set([0, 1])):
raise ValueError("frtcindx parameter contains invalid values")
base_align_raster_path_id_map['proportion_legume_path'] = args[
'proportion_legume_path']
# track separate state variable files for each PFT
pft_sv_dict = {}
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
pft_sv_dict['{}_{}_path'.format(
sv, pft_i)] = '{}_{}.tif'.format(sv, pft_i)
# make sure animal traits exist for each feature in animal management
# layer
anim_id_list = []
driver = ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.Open(args['animal_grazing_areas_path'], 0)
layer = datasource.GetLayer()
for feature in layer:
anim_id_list.append(feature.GetField('animal_id'))
input_animal_trait_table = utils.build_lookup_from_csv(
args['animal_trait_path'], 'animal_id')
missing_animal_trait_list = set(
anim_id_list).difference(input_animal_trait_table.keys())
if missing_animal_trait_list:
raise ValueError(
"Couldn't find trait values for the following animal " +
"ids: %s\n\t" + ", ".join(missing_animal_trait_list))
# if animal density is supplied, align inputs to match its resolution
# otherwise, match resolution of precipitation rasters
if args['animal_density']:
target_pixel_size = pygeoprocessing.get_raster_info(
args['animal_density'])['pixel_size']
base_align_raster_path_id_map['animal_density'] = args[
'animal_density']
else:
target_pixel_size = pygeoprocessing.get_raster_info(
base_align_raster_path_id_map['precip_0'])['pixel_size']
LOGGER.info(
"pixel size of aligned inputs: %s", target_pixel_size)
# temporary directory for intermediate files
global PROCESSING_DIR
PROCESSING_DIR = os.path.join(args['workspace_dir'], "temporary_files")
if not os.path.exists(PROCESSING_DIR):
os.makedirs(PROCESSING_DIR)
# set up a dictionary that uses the same keys as
# 'base_align_raster_path_id_map' to point to the clipped/resampled
# rasters to be used in raster calculations for the model.
aligned_raster_dir = os.path.join(
args['workspace_dir'], 'aligned_inputs')
if os.path.exists(aligned_raster_dir):
shutil.rmtree(aligned_raster_dir)
os.makedirs(aligned_raster_dir)
aligned_inputs = dict([(key, os.path.join(
aligned_raster_dir, 'aligned_%s' % os.path.basename(path)))
for key, path in base_align_raster_path_id_map.items()])
# align all the base inputs to be the minimum known pixel size and to
# only extend over their combined intersections
source_input_path_list = [
base_align_raster_path_id_map[k] for k in sorted(
base_align_raster_path_id_map.keys())]
aligned_input_path_list = [
aligned_inputs[k] for k in sorted(aligned_inputs.keys())]
pygeoprocessing.align_and_resize_raster_stack(
source_input_path_list, aligned_input_path_list,
['near'] * len(source_input_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']],
vector_mask_options={'mask_vector_path': args['aoi_path']})
_check_pft_fractional_cover_sum(aligned_inputs, pft_id_set)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
# create animal trait spatial index raster from management polygon
aligned_inputs['animal_index'] = os.path.join(
aligned_raster_dir, 'animal_spatial_index.tif')
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], aligned_inputs['animal_index'],
gdal.GDT_Int32, [_TARGET_NODATA], fill_value_list=[_TARGET_NODATA])
pygeoprocessing.rasterize(
args['animal_grazing_areas_path'], aligned_inputs['animal_index'],
option_list=["ATTRIBUTE=animal_id"])
# create uniform animal density raster, if not supplied as input
if not args['animal_density']:
aligned_inputs['animal_density'] = os.path.join(
aligned_raster_dir, 'animal_density.tif')
_animal_density(aligned_inputs, args['animal_grazing_areas_path'])
# Initialization
sv_dir = os.path.join(args['workspace_dir'], 'state_variables_m-1')
os.makedirs(sv_dir)
initial_conditions_dir = None
try:
initial_conditions_dir = args['initial_conditions_dir']
except KeyError:
pass
if initial_conditions_dir:
# check that a raster for each required state variable is supplied
missing_initial_values = []
# set _SV_NODATA from initial rasters
state_var_nodata = set([])
# align initial state variables to resampled inputs
resample_initial_path_map = {}
for sv in _SITE_STATE_VARIABLE_FILES:
sv_path = os.path.join(
initial_conditions_dir, _SITE_STATE_VARIABLE_FILES[sv])
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata'][0]]))
resample_initial_path_map[sv] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
sv_key = '{}_{}_path'.format(sv, pft_i)
sv_path = os.path.join(
initial_conditions_dir, '{}_{}.tif'.format(sv, pft_i))
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata']
[0]]))
resample_initial_path_map[sv_key] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
if missing_initial_values:
raise ValueError(
"Couldn't find the following required initial values: " +
"\n\t".join(missing_initial_values))
if len(state_var_nodata) > 1:
raise ValueError(
"Initial state variable rasters contain >1 nodata value")
global _SV_NODATA
_SV_NODATA = list(state_var_nodata)[0]
# align initial values with inputs
initial_path_list = (
[aligned_inputs['precip_0']] +
[resample_initial_path_map[key] for key in sorted(
resample_initial_path_map.keys())])
aligned_initial_path_list = (
[os.path.join(PROCESSING_DIR, 'aligned_input_template.tif')] +
[os.path.join(
sv_dir, os.path.basename(resample_initial_path_map[key])) for
key in sorted(resample_initial_path_map.keys())])
pygeoprocessing.align_and_resize_raster_stack(
initial_path_list, aligned_initial_path_list,
['near'] * len(initial_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']], raster_align_index=0,
vector_mask_options={'mask_vector_path': args['aoi_path']})
sv_reg = dict(
[(key, os.path.join(sv_dir, os.path.basename(path)))
for key, path in resample_initial_path_map.items()])
else:
# create initialization rasters from tables
try:
site_initial_conditions_table = utils.build_lookup_from_csv(
args['site_initial_table'], 'site')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_site_index_list = list(
site_index_set.difference(site_initial_conditions_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find initial conditions values for the following " +
"site indices: %s\n\t" + ", ".join(missing_site_index_list))
try:
pft_initial_conditions_table = utils.build_lookup_from_csv(
args['pft_initial_table'], 'PFT')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_pft_index_list = pft_id_set.difference(
pft_initial_conditions_table.keys())
if missing_pft_index_list:
raise ValueError(
"Couldn't find initial condition values for the following "
"plant functional types: %s\n\t" + ", ".join(
missing_pft_index_list))
sv_reg = initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table)
# calculate persistent intermediate parameters that do not change during
# the simulation
persist_param_dir = os.path.join(
args['workspace_dir'], 'intermediate_parameters')
utils.make_directories([persist_param_dir])
pp_reg = utils.build_file_registry(
[(_PERSISTENT_PARAMS_FILES, persist_param_dir)], file_suffix)
# calculate derived animal traits that do not change during the simulation
freer_parameter_df = pandas.DataFrame.from_dict(
_FREER_PARAM_DICT, orient='index')
freer_parameter_df['type'] = freer_parameter_df.index
animal_trait_table = calc_derived_animal_traits(
input_animal_trait_table, freer_parameter_df)
# calculate maximum potential intake of each animal type
for animal_id in animal_trait_table.keys():
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# calculate field capacity and wilting point
LOGGER.info("Calculating field capacity and wilting point")
_afiel_awilt(
aligned_inputs['site_index'], site_param_table,
sv_reg['som1c_2_path'], sv_reg['som2c_2_path'], sv_reg['som3c_path'],
aligned_inputs['sand'], aligned_inputs['silt'],
aligned_inputs['clay'], aligned_inputs['bulk_d_path'], pp_reg)
# calculate other persistent parameters
LOGGER.info("Calculating persistent parameters")
_persistent_params(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['sand'], aligned_inputs['clay'], pp_reg)
# calculate required ratios for decomposition of structural material
LOGGER.info("Calculating required ratios for structural decomposition")
_structural_ratios(
aligned_inputs['site_index'], site_param_table, sv_reg, pp_reg)
# make yearly directory for values that are updated every twelve months
year_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
year_reg = dict(
[(key, os.path.join(year_dir, path)) for key, path in
_YEARLY_FILES.items()])
for pft_i in pft_id_set:
for file in _YEARLY_PFT_FILES:
year_reg['{}_{}'.format(file, pft_i)] = os.path.join(
year_dir, '{}_{}.tif'.format(file, pft_i))
# make monthly directory for monthly intermediate parameters that are
# shared between submodels, but do not need to be saved as output
month_temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
month_reg = {}
for pft_i in pft_id_set:
for val in _PFT_INTERMEDIATE_VALUES:
month_reg['{}_{}'.format(
val, pft_i)] = os.path.join(
month_temp_dir, '{}_{}.tif'.format(val, pft_i))
for val in _SITE_INTERMEDIATE_VALUES:
month_reg[val] = os.path.join(month_temp_dir, '{}.tif'.format(val))
output_dir = os.path.join(args['workspace_dir'], "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# provisional state variable registry contains provisional biomass in
# absence of grazing
provisional_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
provisional_sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, provisional_sv_dir),
(pft_sv_dict, provisional_sv_dir)], file_suffix)
intermediate_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
# Main simulation loop
# for each step in the simulation
for month_index in range(n_months):
if (month_index % 12) == 0:
# Update yearly quantities
_yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg)
current_month = (starting_month + month_index - 1) % 12 + 1
current_year = starting_year + (starting_month + month_index - 1) // 12
# track state variables from previous step
prev_sv_reg = sv_reg
for animal_id in animal_trait_table.keys():
if animal_trait_table[animal_id]['sex'] == 'breeding_female':
revised_animal_trait_dict = update_breeding_female_status(
animal_trait_table[animal_id], month_index)
animal_trait_table[animal_id] = revised_animal_trait_dict
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# enforce absence of grazing as zero biomass removed
for pft_i in pft_id_set:
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['fdgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
# populate provisional_sv_reg with provisional biomass in absence of
# grazing
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg,
provisional_sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg,
provisional_sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
provisional_sv_reg)
intermediate_sv_reg = copy_intermediate_sv(
pft_id_set, provisional_sv_reg, intermediate_sv_dir)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, provisional_sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, provisional_sv_reg)
# estimate grazing offtake by animals relative to provisional biomass
# at an intermediate step, after senescence but before new growth
_calc_grazing_offtake(
aligned_inputs, args['aoi_path'], args['management_threshold'],
intermediate_sv_reg, pft_id_set, aligned_inputs['animal_index'],
animal_trait_table, veg_trait_table, current_month, month_reg)
# estimate actual biomass production for this step, integrating impacts
# of grazing
sv_dir = os.path.join(
args['workspace_dir'], 'state_variables_m%d' % month_index)
utils.make_directories([sv_dir])
sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, sv_dir),
(pft_sv_dict, sv_dir)], file_suffix)
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg, sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
sv_reg)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, sv_reg)
_animal_diet_sufficiency(
sv_reg, pft_id_set, aligned_inputs, animal_trait_table,
veg_trait_table, current_month, month_reg)
_grazing(
aligned_inputs, site_param_table, month_reg, animal_trait_table,
pft_id_set, sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, sv_reg)
_leach(aligned_inputs, site_param_table, month_reg, sv_reg)
_write_monthly_outputs(
aligned_inputs, provisional_sv_reg, sv_reg, month_reg, pft_id_set,
current_year, current_month, output_dir, file_suffix)
# summary results
summary_output_dir = os.path.join(output_dir, 'summary_results')
os.makedirs(summary_output_dir)
summary_shp_path = os.path.join(
summary_output_dir,
'grazing_areas_results_rpm{}.shp'.format(file_suffix))
create_vector_copy(
args['animal_grazing_areas_path'], summary_shp_path)
field_pickle_map, field_header_order_list = aggregate_and_pickle_results(
output_dir, summary_shp_path)
_add_fields_to_shapefile(
field_pickle_map, field_header_order_list, summary_shp_path)
# clean up
shutil.rmtree(persist_param_dir)
shutil.rmtree(PROCESSING_DIR)
if delete_sv_folders:
for month_index in range(-1, n_months):
shutil.rmtree(
os.path.join(
args['workspace_dir'],
'state_variables_m%d' % month_index))
def raster_multiplication(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Multiply raster1 by raster2.
Multiply raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_multiply_op(raster1, raster2):
"""Multiply two rasters."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
result[valid_mask] = raster1[valid_mask] * raster2[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_multiply_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_division(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Divide raster1 by raster2.
Divide raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_divide_op(raster1, raster2):
"""Divide raster1 by raster2."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
raster1 = raster1.astype(numpy.float32)
raster2 = raster2.astype(numpy.float32)
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
error_mask = ((raster1 != 0) & (raster2 == 0.) & valid_mask)
zero_mask = ((raster1 == 0.) & (raster2 == 0.) & valid_mask)
nonzero_mask = ((raster2 != 0.) & valid_mask)
result[error_mask] = target_path_nodata
result[zero_mask] = 0.
result[nonzero_mask] = raster1[nonzero_mask] / raster2[nonzero_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_divide_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_list_sum(
raster_list, input_nodata, target_path, target_nodata,
nodata_remove=False):
"""Calculate the sum per pixel across rasters in a list.
Sum the rasters in `raster_list` element-wise, allowing nodata values
in the rasters to propagate to the result or treating nodata as zero. If
nodata is treated as zero, areas where all inputs are nodata will be nodata
in the output.
Parameters:
raster_list (list): list of paths to rasters to sum
input_nodata (float or int): nodata value in the input rasters
target_path (string): path to location to store the result
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any input
raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(*raster_list):
"""Add the rasters in raster_list without removing nodata values."""
invalid_mask = numpy.any(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
def raster_sum_op_nodata_remove(*raster_list):
"""Add the rasters in raster_list, treating nodata as zero."""
invalid_mask = numpy.all(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op_nodata_remove,
target_path, gdal.GDT_Float32, target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op,
target_path, gdal.GDT_Float32, target_nodata)
def raster_sum(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Add raster 1 and raster2.
Add raster1 and raster2, allowing nodata values in the rasters to
propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to one raster operand
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to second raster operand
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the sum
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(raster1, raster2):
"""Add raster1 and raster2 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] + raster2[valid_mask]
return result
def raster_sum_op_nodata_remove(raster1, raster2):
"""Add raster1 and raster2, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 + raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op, target_path, gdal.GDT_Float32,
target_nodata)
def raster_difference(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Subtract raster2 from raster1.
Subtract raster2 from raster1 element-wise, allowing nodata values in the
rasters to propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to raster from which to subtract raster2
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to raster which should be subtracted from
raster1
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the difference
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the difference in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_difference_op(raster1, raster2):
"""Subtract raster2 from raster1 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]
return result
def raster_difference_op_nodata_remove(raster1, raster2):
"""Subtract raster2 from raster1, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 - raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op, target_path, gdal.GDT_Float32,
target_nodata)
def reclassify_nodata(target_path, new_nodata_value):
"""Reclassify the nodata value of a raster to a new value.
Convert all areas of nodata in the target raster to the new nodata
value, which must be an integer.
Parameters:
target_path (string): path to target raster
new_nodata_value (integer): new value to set as nodata
Side effects:
modifies the raster indicated by `target_path`
Returns:
None
"""
def reclassify_op(target_raster):
reclassified_raster = numpy.copy(target_raster)
reclassify_mask = (target_raster == previous_nodata_value)
reclassified_raster[reclassify_mask] = new_nodata_value
return reclassified_raster
fd, temp_path = tempfile.mkstemp(dir=PROCESSING_DIR)
shutil.copyfile(target_path, temp_path)
previous_nodata_value = pygeoprocessing.get_raster_info(
target_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(temp_path, 1)], reclassify_op, target_path, gdal.GDT_Float32,
new_nodata_value)
# clean up
os.close(fd)
os.remove(temp_path)
def weighted_state_variable_sum(
sv, sv_reg, aligned_inputs, pft_id_set, weighted_sum_path):
"""Calculate weighted sum of state variable across plant functional types.
To sum a state variable across PFTs within a grid cell, the state variable
must be weighted by the fractional cover of each PFT inside the grid cell.
First multiply the state variable by its fractional cover, and then add up
the weighted products.
Parameters:
sv (string): state variable to be summed across plant functional types
sv_reg (dict): map of key, path pairs giving paths to state variables,
including sv, the state variable to be summed
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
weighted_sum_path (string): path to raster that should contain the
weighted sum across PFTs
Side effects:
modifies or creates the raster indicated by `weighted_sum_path`
Returns:
None
"""
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in pft_id_set:
val = '{}_weighted'.format(sv)
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
weighted_path_list = []
for pft_i in pft_id_set:
target_path = temp_val_dict['{}_weighted_{}'.format(sv, pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
sv_reg['{}_{}_path'.format(sv, pft_i)], _SV_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
raster_list_sum(
weighted_path_list, _TARGET_NODATA, weighted_sum_path, _TARGET_NODATA,
nodata_remove=True)
# clean up temporary files
shutil.rmtree(temp_dir)
def _check_pft_fractional_cover_sum(aligned_inputs, pft_id_set):
"""Check the sum of fractional cover across plant functional types.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
Raises:
ValueError if the pixel-wise sum of fractional cover values across
plant functional types exceeds 1
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='cover_sum', dir=PROCESSING_DIR) as cover_sum_temp_file:
cover_sum_path = cover_sum_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
# initialize sum to zero
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], cover_sum_path, gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
for pft_i in pft_id_set:
shutil.copyfile(cover_sum_path, operand_temp_path)
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_sum(
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
operand_temp_path, _TARGET_NODATA,
cover_sum_path, _TARGET_NODATA)
# get maximum sum of fractional cover
max_cover = 0.
for offset_map, raster_block in pygeoprocessing.iterblocks(
(cover_sum_path, 1)):
valid_mask = (raster_block != _TARGET_NODATA)
if raster_block[valid_mask].size > 0:
max_cover = max(max_cover, numpy.amax(raster_block[valid_mask]))
if max_cover > 1:
raise ValueError(
"Fractional cover across plant functional types exceeds 1")
# clean up
os.remove(cover_sum_path)
def initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table):
"""Generate initial state variable registry from initial conditions tables.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including site spatial index raster and
fractional cover of each plant functional type
sv_dir (string): path to directory where initial state variable rasters
should be stored
pft_id_set (set): set of integers identifying plant functional types
site_initial_conditions_table (dict): map of site spatial index to
dictionaries that contain initial values for site-level state
variables
pft_initial_conditions_table (dict): map of plant functional type index
to dictionaries that contain initial values for plant functional
type-level state variables
Returns:
initial_sv_reg, map of key, path pairs giving paths to initial state
variable rasters
"""
def full_masked(pft_cover, fill_val):
"""Create a constant raster masked by pft fractional cover.
Parameters:
pft_cover (numpy.ndarray): input, fractional cover of the plant
functional type
fill_val (float): constant value with which to fill raster in areas
where fractional cover > 0
Returns:
full_masked, a raster containing `fill_val` in areas where
`pft_cover` > 0
"""
valid_mask = (
(~numpy.isclose(pft_cover, _SV_NODATA)) &
(pft_cover > 0))
full_masked = numpy.empty(pft_cover.shape, dtype=numpy.float32)
full_masked[:] = _SV_NODATA
full_masked[valid_mask] = fill_val
return full_masked
initial_sv_reg = {}
# site-level state variables
# check for missing state variable values
required_site_state_var = set(
[sv_key[:-5] for sv_key in _SITE_STATE_VARIABLE_FILES.keys()])
for site_code in site_initial_conditions_table.keys():
missing_site_state_var = required_site_state_var.difference(
site_initial_conditions_table[site_code].keys())
if missing_site_state_var:
raise ValueError(
"The following state variables were not found in the site " +
"initial conditions table: \n\t" + "\n\t".join(
missing_site_state_var))
for sv_key, basename in _SITE_STATE_VARIABLE_FILES.items():
state_var = sv_key[:-5]
site_to_val = dict(
[(site_code, float(table[state_var])) for (
site_code, table) in
site_initial_conditions_table.items()])
target_path = os.path.join(sv_dir, basename)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _SV_NODATA)
# PFT-level state variables
for pft_i in pft_id_set:
# check for missing values
missing_pft_state_var = set(_PFT_STATE_VARIABLES).difference(
pft_initial_conditions_table[pft_i].keys())
if missing_pft_state_var:
raise ValueError(
"The following state variables were not found in the plant " +
"functional type initial conditions table: \n\t" + "\n\t".join(
missing_pft_state_var))
for state_var in _PFT_STATE_VARIABLES:
fill_val = pft_initial_conditions_table[pft_i][state_var]
pft_cover_path = aligned_inputs['pft_{}'.format(pft_i)]
target_path = os.path.join(
sv_dir, '{}_{}.tif'.format(state_var, pft_i))
sv_key = '{}_{}_path'.format(state_var, pft_i)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.raster_calculator(
[(pft_cover_path, 1), (fill_val, 'raw')],
full_masked, target_path, gdal.GDT_Float32, _SV_NODATA)
return initial_sv_reg
def _calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulkd_path, edepth_path,
ompc_path):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): path to active organic soil carbon raster
som2c_2_path (string): path to slow organic soil carbon raster
som3c_path (string): path to passive organic soil carbon raster
bulkd_path (string): path to bulk density of soil raster
edepth (string): path to depth of soil raster
ompc_path (string): path to result, total soil organic matter
Side effects:
modifies or creates the raster indicated by `ompc_path`
Returns:
None
"""
def ompc_op(som1c_2, som2c_2, som3c, bulkd, edepth):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): state variable, active organic soil carbon
som2c_2_path (string): state variable, slow organic soil carbon
som3c_path (string): state variable, passive organic soil carbon
bulkd_path (string): input, bulk density of soil
edepth_path (string): parameter, depth of soil for this
calculation
Returns:
ompc, total soil organic matter weighted by bulk
density.
"""
ompc = numpy.empty(som1c_2.shape, dtype=numpy.float32)
ompc[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som2c_2, _SV_NODATA)) &
(~numpy.isclose(som3c, _SV_NODATA)) &
(~numpy.isclose(bulkd, bulkd_nodata)) &
(edepth != _IC_NODATA))
ompc[valid_mask] = (
(som1c_2[valid_mask] + som2c_2[valid_mask] +
som3c[valid_mask]) * 1.724 /
(10000. * bulkd[valid_mask] * edepth[valid_mask]))
return ompc
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som2c_2_path, som3c_path,
bulkd_path, edepth_path]],
ompc_op, ompc_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, afiel_path):
"""Calculate field capacity for one soil layer.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
afiel_path (string): path to result raster, field capacity for this
soil layer
Side effects:
creates the raster indicated by `afiel_path`
Returns:
None
"""
def afiel_op(sand, silt, clay, ompc, bulkd):
"""Calculate field capacity for one soil layer.
Field capacity, maximum soil moisture retention capacity,
from <NAME> Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
afiel, field capacity for this soil layer
"""
afiel = numpy.empty(sand.shape, dtype=numpy.float32)
afiel[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
afiel[valid_mask] = (
0.3075 * sand[valid_mask] + 0.5886 * silt[valid_mask] +
0.8039 * clay[valid_mask] + 2.208E-03 * ompc[valid_mask] +
-0.1434 * bulkd[valid_mask])
return afiel
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
afiel_op, afiel_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, awilt_path):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
awilt_path (string): path to result raster, wilting point for this
soil layer
Side effects:
creates the raster indicated by `awilt_path`
Returns:
None
"""
def awilt_op(sand, silt, clay, ompc, bulkd):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
awilt, wilting point for this soil layer
"""
awilt = numpy.empty(sand.shape, dtype=numpy.float32)
awilt[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
awilt[valid_mask] = (
-0.0059 * sand[valid_mask] + 0.1142 * silt[valid_mask] +
0.5766 * clay[valid_mask] + 2.228E-03 * ompc[valid_mask] +
0.02671 * bulkd[valid_mask])
return awilt
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
awilt_op, awilt_path, gdal.GDT_Float32, _TARGET_NODATA)
def _afiel_awilt(
site_index_path, site_param_table, som1c_2_path, som2c_2_path,
som3c_path, sand_path, silt_path, clay_path, bulk_d_path, pp_reg):
"""Calculate field capacity and wilting point for each soil layer.
Computations based on Gupta and Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution, organic
matter percent and bulk density'. Water Resources Research 15:1633.
Field capacity is calculated for -0.33 bar; wilting point is
calculated for water content at -15 bars.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters including 'edepth' field
som1c_2_path (string): path to the state variable 'som1c_2',
active organic soil carbon
som2c_2_path (string): path to the state variable 'som2c_2',
slow organic soil carbon
som3c_path (string): path to the state variable 'som3c',
passive organic soil carbon
sand_path (string): path to raster containing proportion sand in soil
silt_path (string): path to raster containing proportion silt in soil
clay_path (string): path to raster containing proportion clay in soil
bulk_d_path (string): path to raster containing bulk density of soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
Modifies the rasters pp_reg['afiel_<layer>'] and pp_reg['awilt_<layer>']
for all soil layers.
Returns:
None
"""
def decrement_ompc(ompc_orig_path, ompc_dec_path):
"""Decrease estimated organic matter to 85% of its value.
In each subsequent soil layer, estimated organic matter is decreased
by 15%, to 85% of its previous value.
Parameters:
ompc_orig_path (string): path to estimated soil organic matter
raster
ompc_dec_path (string): path to result raster, estimated soil
organic matter decreased to 85% of its previous value
Side effects:
modifies or creates the raster indicated by `ompc_dec_path`
Returns:
None
"""
def decrement_op(ompc_orig):
"""Reduce organic matter to 85% of its previous value."""
ompc_dec = numpy.empty(ompc_orig.shape, dtype=numpy.float32)
ompc_dec[:] = _TARGET_NODATA
valid_mask = (ompc_orig != _TARGET_NODATA)
ompc_dec[valid_mask] = ompc_orig[valid_mask] * 0.85
return ompc_dec
pygeoprocessing.raster_calculator(
[(ompc_orig_path, 1)], decrement_op, ompc_dec_path,
gdal.GDT_Float32, _TARGET_NODATA)
# temporary intermediate rasters for calculating field capacity and
# wilting point
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
edepth_path = os.path.join(temp_dir, 'edepth.tif')
ompc_path = os.path.join(temp_dir, 'ompc.tif')
site_to_edepth = dict(
[(site_code, float(table['edepth'])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_edepth, edepth_path, gdal.GDT_Float32,
_IC_NODATA)
# estimate total soil organic matter
_calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulk_d_path, edepth_path,
ompc_path)
# calculate field capacity and wilting point for each soil layer,
# decreasing organic matter content by 85% with each layer
for lyr in range(1, 10):
afiel_path = pp_reg['afiel_{}_path'.format(lyr)]
awilt_path = pp_reg['awilt_{}_path'.format(lyr)]
_calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
afiel_path)
_calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
awilt_path)
ompc_dec_path = os.path.join(temp_dir, 'ompc{}.tif'.format(lyr))
decrement_ompc(ompc_path, ompc_dec_path)
ompc_path = ompc_dec_path
# clean up temporary files
shutil.rmtree(temp_dir)
def _persistent_params(
site_index_path, site_param_table, sand_path, clay_path, pp_reg):
"""Calculate persistent parameters.
The calculated values do not change over the course of the simulation.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sand_path (string): path to raster containing proportion sand in soil
clay_path (string): path to raster containing proportion clay in soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['wc_path']
pp_reg['eftext_path']
pp_reg['p1co2_2_path']
pp_reg['fps1s3_path']
pp_reg['fps2s3_path']
pp_reg['orglch_path']
pp_reg['vlossg_path']
Returns:
None
"""
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
# temporary intermediate rasters for persistent parameters calculation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in[
'peftxa', 'peftxb', 'p1co2a_2', 'p1co2b_2', 'ps1s3_1',
'ps1s3_2', 'ps2s3_1', 'ps2s3_2', 'omlech_1', 'omlech_2', 'vlossg']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
def calc_wc(afiel_1, awilt_1):
"""Calculate water content of soil layer 1."""
return afiel_1 - awilt_1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pp_reg['afiel_1_path'], pp_reg['awilt_1_path']]],
calc_wc, pp_reg['wc_path'], gdal.GDT_Float32, _TARGET_NODATA)
def calc_eftext(peftxa, peftxb, sand):
"""Calculate effect of soil texture on microbial decomposition.
Use an empirical regression to estimate the effect of soil
sand content on the microbe decomposition rate. Line 359 Prelim.f
Parameters:
peftxa (numpy.ndarray): parameter, regression intercept
peftxb (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
eftext, coefficient that modifies microbe decomposition rate.
"""
eftext = numpy.empty(sand.shape, dtype=numpy.float32)
eftext[:] = _IC_NODATA
valid_mask = (
(peftxa != _IC_NODATA) &
(peftxb != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
eftext[valid_mask] = (
peftxa[valid_mask] + (peftxb[valid_mask] * sand[valid_mask]))
return eftext
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['peftxa'], param_val_dict['peftxb'], sand_path]],
calc_eftext, pp_reg['eftext_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_p1co2_2(p1co2a_2, p1co2b_2, sand):
"""Calculate the fraction of carbon lost to CO2 from som1c_2.
During decomposition from active organic soil carbon, a fraction
of decomposing material is lost to CO2 as the soil respires.
Line 366 Prelim.f
Parameters:
p1co2a_2 (numpy.ndarray): parameter, intercept of regression
predicting loss to CO2 from active organic soil carbon
p1co2b_2 (numpy.ndarray): parameter, slope of regression
predicting loss to CO2 from active organic soil carbon
sand (numpy.ndarray): input, proportion sand in soil
Returns:
p1co2_2, fraction of carbon that flows to CO2 from active
organic soil carbon
"""
p1co2_2 = numpy.empty(sand.shape, dtype=numpy.float32)
p1co2_2[:] = _IC_NODATA
valid_mask = (
(p1co2a_2 != _IC_NODATA) &
(p1co2b_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
p1co2_2[valid_mask] = (
p1co2a_2[valid_mask] + (p1co2b_2[valid_mask] * sand[valid_mask]))
return p1co2_2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['p1co2a_2'],
param_val_dict['p1co2b_2'], sand_path]],
calc_p1co2_2, pp_reg['p1co2_2_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps1s3(ps1s3_1, ps1s3_2, clay):
"""Calculate effect of clay content on decomposition from som1c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from soil organic matter with fast turnover to
soil organic matter with slow turnover. Line 370 Prelim.f
Parameters:
ps1s3_1 (numpy.ndarray): parameter, regression intercept
ps1s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps1s3, coefficient that modifies rate of decomposition
from som1c_2
"""
fps1s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps1s3[:] = _IC_NODATA
valid_mask = (
(ps1s3_1 != _IC_NODATA) &
(ps1s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps1s3[valid_mask] = (
ps1s3_1[valid_mask] + (ps1s3_2[valid_mask] * clay[valid_mask]))
return fps1s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps1s3_1'], param_val_dict['ps1s3_2'], clay_path]],
calc_fps1s3, pp_reg['fps1s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps2s3(ps2s3_1, ps2s3_2, clay):
"""Calculate effect of clay content on decomposition from som2c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from slow soil organic carbon to soil passive organic
carbon. Line 371 Prelim.f
Parameters:
ps2s3_1 (numpy.ndarray): parameter, regression intercept
ps2s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps2s3, coefficient that modifies rate of decomposition from
som2c_2 to som3c
"""
fps2s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps2s3[:] = _IC_NODATA
valid_mask = (
(ps2s3_1 != _IC_NODATA) &
(ps2s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps2s3[valid_mask] = (
ps2s3_1[valid_mask] + (ps2s3_2[valid_mask] * clay[valid_mask]))
return fps2s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps2s3_1'], param_val_dict['ps2s3_2'], clay_path]],
calc_fps2s3, pp_reg['fps2s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_orglch(omlech_1, omlech_2, sand):
"""Calculate the effect of sand content on leaching from soil.
Use an empirical regression to estimate the effect of sand content
of soil on rate of organic leaching from soil when there is drainage
of soil water from soil layer 1 to soil layer 2. Line 110 Predec.f
Parameters:
omlech_1 (numpy.ndarray): parameter, regression intercept
omlech_2 (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
orglch, the fraction of organic compounds leaching from soil
with drainage from soil layer 1 to layer 2
"""
orglch = numpy.empty(sand.shape, dtype=numpy.float32)
orglch[:] = _IC_NODATA
valid_mask = (
(omlech_1 != _IC_NODATA) &
(omlech_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
orglch[valid_mask] = (
omlech_1[valid_mask] + (omlech_2[valid_mask] * sand[valid_mask]))
return orglch
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['omlech_1'], param_val_dict['omlech_2'],
sand_path]],
calc_orglch, pp_reg['orglch_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_vlossg(vlossg_param, clay):
"""Calculate proportion of gross mineralized N that is volatized.
During decomposition, some N is lost to volatilization. This is a
function of the gross mineralized N and is calculated according to this
multiplier, which varies with soil clay content.
Parameters:
vlossg (numpy.ndarray): parameter, volatilization loss multiplier
clay (numpy.ndarray): input, proportion clay in soil
Returns:
vlossg, proportion of gross mineralized N that is volatized
"""
valid_mask = (
(vlossg_param != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
vlossg = numpy.empty(vlossg_param.shape, dtype=numpy.float32)
vlossg[:] = _IC_NODATA
max_mask = ((clay > 0.3) & valid_mask)
min_mask = ((clay < 0.1) & valid_mask)
vlossg[valid_mask] = -0.1 * (clay[valid_mask] - 0.3) + 0.01
vlossg[max_mask] = 0.01
vlossg[min_mask] = 0.03
vlossg[valid_mask] = vlossg[valid_mask] * vlossg_param[valid_mask]
return vlossg
pygeoprocessing.raster_calculator(
[(path, 1) for path in [param_val_dict['vlossg'], clay_path]],
calc_vlossg, pp_reg['vlossg_path'], gdal.GDT_Float32, _IC_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _aboveground_ratio(anps, tca, pcemic_1, pcemic_2, pcemic_3):
"""Calculate C/<iel> ratios of decomposing aboveground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in aboveground material for the material to decompose. Agdrat.f
Parameters:
anps (numpy.ndarray): state variable, N or P in the donor material
tca (numpy.ndarray): state variable, total C in the donor material
pcemic_1 (numpy.ndarray): parameter, maximum C/<iel> of new material
pcemic_2 (numpy.ndarray): parameter, minimum C/<iel> of new material
pcemic_3 (numpy.ndarray): parameter, minimum <iel> content of
decomposing material that gives minimum C/<iel> of new material
Returns:
agdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(tca, _SV_NODATA)) &
(pcemic_1 != _IC_NODATA) &
(pcemic_2 != _IC_NODATA) &
(pcemic_3 != _IC_NODATA))
cemicb = numpy.empty(anps.shape, dtype=numpy.float32)
cemicb[:] = _IC_NODATA
cemicb[valid_mask] = (
(pcemic_2[valid_mask] - pcemic_1[valid_mask]) /
pcemic_3[valid_mask])
econt = numpy.empty(anps.shape, dtype=numpy.float32)
econt[:] = _TARGET_NODATA
econt[valid_mask] = 0
decompose_mask = ((tca > 0.) & valid_mask)
econt[decompose_mask] = anps[decompose_mask] / (tca[decompose_mask] * 2.5)
agdrat = numpy.empty(anps.shape, dtype=numpy.float32)
agdrat[:] = _TARGET_NODATA
agdrat[valid_mask] = pcemic_2[valid_mask]
compute_mask = ((econt <= pcemic_3) & valid_mask)
agdrat[compute_mask] = (
pcemic_1[compute_mask] + econt[compute_mask] * cemicb[compute_mask])
return agdrat
def _belowground_ratio(aminrl, varat_1_iel, varat_2_iel, varat_3_iel):
"""Calculate C/<iel> ratios of decomposing belowground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in soil metabolic material to decompose. Bgdrat.f
Parameters:
aminrl (numpy.ndarray): derived, average surface mineral <iel>
varat_1_iel (numpy.ndarray): parameter, maximum C/<iel> ratio for
newly decomposed material
varat_2_iel (numpy.ndarray): parameter, minimum C/<iel> ratio
varat_3_iel (numpy.ndarray): parameter, amount of <iel> present
when minimum ratio applies
Returns:
bgdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(aminrl, _SV_NODATA)) &
(varat_1_iel != _IC_NODATA) &
(varat_2_iel != _IC_NODATA) &
(varat_3_iel != _IC_NODATA))
bgdrat = numpy.empty(aminrl.shape, dtype=numpy.float32)
bgdrat[:] = _TARGET_NODATA
bgdrat[valid_mask] = (
(1. - aminrl[valid_mask] / varat_3_iel[valid_mask]) *
(varat_1_iel[valid_mask] - varat_2_iel[valid_mask]) +
varat_2_iel[valid_mask])
max_mask = ((aminrl <= 0) & valid_mask)
bgdrat[max_mask] = varat_1_iel[max_mask]
min_mask = ((aminrl > varat_3_iel) & valid_mask)
bgdrat[min_mask] = varat_2_iel[min_mask]
return bgdrat
def _structural_ratios(site_index_path, site_param_table, sv_reg, pp_reg):
"""Calculate maximum C/N and C/P ratios for structural material.
These ratios limit decomposition of structural material (i.e., material
containing lignin). Lines 31-77 Predec.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['rnewas_1_1_path']
pp_reg['rnewas_1_2_path']
pp_reg['rnewas_2_1_path']
pp_reg['rnewas_2_2_path']
pp_reg['rnewbs_1_1_path']
pp_reg['rnewbs_1_2_path']
pp_reg['rnewbs_2_1_path']
pp_reg['rnewbs_2_2_path']
Returns:
None
"""
# temporary parameter rasters for structural ratios calculations
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for iel in [1, 2]:
for val in[
'pcemic1_2', 'pcemic1_1', 'pcemic1_3', 'pcemic2_2',
'pcemic2_1', 'pcemic2_3', 'rad1p_1', 'rad1p_2',
'rad1p_3', 'varat1_1', 'varat22_1']:
target_path = os.path.join(temp_dir, '{}_{}.tif'.format(val, iel))
param_val_dict['{}_{}'.format(val, iel)] = target_path
site_to_val = dict(
[(site_code, float(table['{}_{}'.format(val, iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
def calc_rnewas_som2(
pcemic2_2, pcemic2_1, pcemic2_3, struce_1, strucc_1, rad1p_1,
rad1p_2, rad1p_3, pcemic1_2, rnewas1):
"""Calculate C/<iel> ratio for decomposition into som2.
This ratio is calculated separately for each nutrient (i.e., N, P).
When material decomposes into the surface slow organic pool, the
C/<iel> ratio of decomposing material must be smaller than or equal to
this ratio. A portion of the ratio of material entering som1, the
surface active pool, is also added to som2 and calculated here.
Parameters:
pcemic2_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface slow organic pool
pcemic2_1 (numpy.ndarray): parameter, maximum C/<iel> ratio for
surface slow organic pool
pcemic2_3 (numpy.ndarray): parameter, mimimum <iel> content of
decomposing aboveground material, above which the C/<iel>
ratio of the surface slow organic pool equals pcemic1_2
struce_1 (numpy.ndarray): state variable, <iel> in surface
structural material
strucc_1 (numpy.ndarray): state variable, C in surface
structural material
rad1p_1 (numpy.ndarray): parameter, intercept of regression used
to calculate addition of <iel> from surface active pool
rad1p_2 (numpy.ndarray): parameter, slope of regression used
to calculate addition of <iel> from surface active pool
rad1p_3 (numpy.ndarray): parameter, minimum allowable C/<iel>
used to calculate addition term for C/<iel> ratio of som2
formed from surface active pool
pcemic1_2 (numpy.ndarray): parameter, minimum C/<iel> ratio for
surface active organic pool
rnewas1 (numpy.ndarray): derived, C/<iel> ratio for decomposition
into som1
Returns:
rnewas2, required ratio for decomposition of structural material
into som2 for one nutrient
"""
valid_mask = (
(pcemic2_2 != _IC_NODATA) &
(pcemic2_1 != _IC_NODATA) &
(pcemic2_3 != _IC_NODATA) &
(~numpy.isclose(struce_1, _SV_NODATA)) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(rad1p_1 != _IC_NODATA) &
(rad1p_2 != _IC_NODATA) &
(rad1p_3 != _IC_NODATA) &
(pcemic1_2 != _IC_NODATA) &
(rnewas1 != _TARGET_NODATA))
rnewas2 = _aboveground_ratio(
struce_1, strucc_1, pcemic2_1, pcemic2_2, pcemic2_3)
radds1 = numpy.empty(strucc_1.shape, dtype=numpy.float32)
radds1[:] = _TARGET_NODATA
radds1[valid_mask] = (
rad1p_1[valid_mask] + rad1p_2[valid_mask] *
(rnewas1[valid_mask] - pcemic1_2[valid_mask]))
rnewas2[valid_mask] = rnewas1[valid_mask] + radds1[valid_mask]
rnewas2[valid_mask] = numpy.maximum(
rnewas2[valid_mask], rad1p_3[valid_mask])
return rnewas2
for iel in [1, 2]:
# calculate rnewas_iel_1 - aboveground material to SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['pcemic1_1_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
param_val_dict['pcemic1_3_{}'.format(iel)]]],
_aboveground_ratio, pp_reg['rnewas_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewas_iel_2 - aboveground material to SOM2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['pcemic2_2_{}'.format(iel)],
param_val_dict['pcemic2_1_{}'.format(iel)],
param_val_dict['pcemic2_3_{}'.format(iel)],
sv_reg['struce_1_{}_path'.format(iel)],
sv_reg['strucc_1_path'],
param_val_dict['rad1p_1_{}'.format(iel)],
param_val_dict['rad1p_2_{}'.format(iel)],
param_val_dict['rad1p_3_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
pp_reg['rnewas_{}_1_path'.format(iel)]]],
calc_rnewas_som2, pp_reg['rnewas_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_1 - belowground material to SOM1
site_to_varat1_1 = dict([
(site_code, float(table['varat1_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat1_1,
pp_reg['rnewbs_{}_1_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate rnewbs_iel_2 - belowground material to SOM2
# rnewbs(iel,2) = varat22(1,iel)
site_to_varat22_1 = dict([
(site_code, float(table['varat22_1_{}'.format(iel)])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_varat22_1,
pp_reg['rnewbs_{}_2_path'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg):
"""Calculate quantities that remain static for 12 months.
These quantities are annual precipitation, annual atmospheric N
deposition, and the fraction of plant residue which is lignin for each pft.
Century also calculates non-symbiotic soil N fixation once yearly, but here
those were moved to monthly tasks. Century uses precipitation in the future
12 months (prcgrw) to predict root:shoot ratios, but here we instead use
the sum of monthly precipitation in 12 months including the current one, if
data for 12 future months are not available.
Lines 79-82, 164 Eachyr.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including monthly precipitation and site
spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
month_index (int): current monthly step, relative to 0 so that
month_index=0 at first monthly time step
pft_id_set (set): set of integers identifying plant functional types
year_reg (dict): map of key, path pairs giving paths to the annual
precipitation and N deposition rasters
Side effects:
modifies or creates the rasters indicated by:
year_reg['annual_precip_path']
year_reg['baseNdep_path']
year_reg['pltlig_above_<pft>'] for each pft
year_reg['pltlig_below_<pft>'] for each pft
Returns:
None
Raises:
ValueError if fewer than 12 monthly precipitation rasters can be found
"""
def calc_base_N_dep(epnfa_1, epnfa_2, prcann):
"""Calculate base annual atmospheric N deposition.
Parameters:
epnfa_1 (numpy.ndarray): parameter, intercept of regression
predicting atmospheric N deposition from precipitation
epnfa_2 (numpy.ndarray): parameter, slope of regression predicting
atmospheric N deposition from precipitation
prcann (numpy.ndarray): derived, annual precipitation
Returns:
baseNdep, annual atmospheric N deposition
"""
baseNdep = numpy.empty(prcann.shape, dtype=numpy.float32)
baseNdep[:] = 0.
valid_mask = (
(epnfa_1 != _IC_NODATA) &
(epnfa_2 != _IC_NODATA) &
(prcann != _TARGET_NODATA))
baseNdep[valid_mask] = (
epnfa_1[valid_mask] +
(epnfa_2[valid_mask] * numpy.minimum(prcann[valid_mask], 80.)))
baseNdep[baseNdep < 0] = 0.
return baseNdep
def calc_pltlig(fligni_1_lyr, fligni_2_lyr, prcann):
"""Calculate the fraction of residue that is lignin. Cmplig.f
This fraction is used to calculate the fraction of residue (i.e.,
incoming litter from fall of standing dead or incoming soil from death
of roots) that is partitioned to metabolic vs structural pools. It is
calculated once per year from annual precipitation and fixed
parameters.
Parameters:
fligni_1_lyr (numpy.ndarray): parameter, intercept for regression
predicting lignin content fraction from rainfall
fligni_2_lyr (numpy.ndarray): parameter, slope for regression
predicting lignin content fraction from rainfall
prcann (numpy.ndarray): derived, annual precipitation
Returns:
pltlig_lyr, fraction of residue that is lignin
"""
valid_mask = (
(fligni_1_lyr != _IC_NODATA) &
(fligni_2_lyr != _IC_NODATA) &
(prcann != _TARGET_NODATA))
pltlig = numpy.empty(fligni_1_lyr.shape, dtype=numpy.float32)
pltlig[:] = _TARGET_NODATA
pltlig[valid_mask] = (
fligni_1_lyr[valid_mask] + fligni_2_lyr[valid_mask] *
prcann[valid_mask])
pltlig[valid_mask] = numpy.clip(pltlig[valid_mask], 0.02, 0.5)
return pltlig
offset = -12
annual_precip_rasters = []
while len(annual_precip_rasters) < 12:
offset += 1
if offset == 12:
raise ValueError("Insufficient precipitation rasters were found")
precip_month = month_index + offset
try:
annual_precip_rasters.append(
aligned_inputs['precip_%d' % precip_month])
except KeyError:
continue
precip_nodata = set([])
for precip_raster in annual_precip_rasters:
precip_nodata.update(
set([pygeoprocessing.get_raster_info(precip_raster)['nodata'][0]]))
if len(precip_nodata) > 1:
raise ValueError("Precipitation rasters include >1 nodata value")
precip_nodata = list(precip_nodata)[0]
raster_list_sum(
annual_precip_rasters, precip_nodata, year_reg['annual_precip_path'],
_TARGET_NODATA)
# intermediate parameter rasters for this operation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in['epnfa_1', 'epnfa_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['fligni_1_1', 'fligni_2_1', 'fligni_1_2', 'fligni_2_2']:
for pft_i in pft_id_set:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
# calculate base N deposition
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['epnfa_1'], param_val_dict['epnfa_2'],
year_reg['annual_precip_path']]],
calc_base_N_dep, year_reg['baseNdep_path'], gdal.GDT_Float32,
_TARGET_NODATA)
for pft_i in pft_id_set:
# fraction of surface residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_1_{}'.format(pft_i)],
param_val_dict['fligni_2_1_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_above_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# fraction of soil residue that is lignin
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['fligni_1_2_{}'.format(pft_i)],
param_val_dict['fligni_2_2_{}'.format(pft_i)],
year_reg['annual_precip_path']]],
calc_pltlig, year_reg['pltlig_below_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_latitude(template_raster, latitude_raster_path):
"""Calculate latitude at the center of each pixel in a template raster."""
pygeoprocessing.new_raster_from_base(
template_raster, latitude_raster_path, gdal.GDT_Float32,
[_IC_NODATA])
latitude_raster = gdal.OpenEx(
latitude_raster_path, gdal.OF_RASTER | gdal.GA_Update)
target_band = latitude_raster.GetRasterBand(1)
base_raster_info = pygeoprocessing.get_raster_info(template_raster)
geotransform = base_raster_info['geotransform']
for offset_map, raster_block in pygeoprocessing.iterblocks(
(template_raster, 1)):
n_y_block = raster_block.shape[0]
n_x_block = raster_block.shape[1]
# offset by .5 so we're in the center of the pixel
xoff = offset_map['xoff'] + 0.5
yoff = offset_map['yoff'] + 0.5
# calculate the projected x and y coordinate bounds for the block
x_range = numpy.linspace(
geotransform[0] + geotransform[1] * xoff,
geotransform[0] + geotransform[1] * (xoff + n_x_block - 1),
n_x_block)
y_range = numpy.linspace(
geotransform[3] + geotransform[5] * yoff,
geotransform[3] + geotransform[5] * (yoff + n_y_block - 1),
n_y_block)
# we'll use this to avoid generating any nodata points
valid_mask = raster_block != base_raster_info['nodata']
# these indexes correspond to projected coordinates
# y_vector is what we want, an array of latitude coordinates
x_vector, y_vector = numpy.meshgrid(x_range, y_range)
target_band.WriteArray(
y_vector, xoff=offset_map['xoff'], yoff=offset_map['yoff'])
# Making sure the band and dataset is flushed and not in memory
target_band.FlushCache()
target_band.FlushCache()
target_band = None
gdal.Dataset.__swig_destroy__(latitude_raster)
latitude_raster = None
def _calc_daylength(template_raster, month, daylength_path):
"""Calculate estimated hours of daylength. Daylen.c.
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
daylength_path (string): path to shortwave radiation raster
Side effects:
modifies or creates the raster indicated by `daylength_path`
Returns:
None
"""
def daylength(month):
def _daylength(latitude):
"""Estimate hours of daylength for a given month and latitude."""
# Julian day at beginning of each month
jday_list = [
1, 32, 61, 92, 122, 153, 183, 214, 245, 275, 306, 337]
jday = jday_list[month - 1]
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
declin = 0.4014 * numpy.sin(6.283185 * (jday - 77.0) / 365)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0] = 0
par1 = numpy.sqrt(temp)
par2 = -numpy.tan(rlatitude) * numpy.tan(declin)
ahou = numpy.arctan2(par1, par2)
hours_of_daylength = (ahou / numpy.pi) * 24
return hours_of_daylength
return _daylength
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)], daylength(month), daylength_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _shortwave_radiation(template_raster, month, shwave_path):
"""Calculate shortwave radiation outside the atmosphere.
Shortwave radiation outside the atmosphere is calculated according to
Penman (1948), "Natural evaporation from open water, bare soil and grass",
Proc. Roy. Soc. London. The latitude of each pixel is required to
calculate radiation and is calculated as an intermediate step from the
input `template_raster`. shwave.f
Parameters:
template_raster (string): path to a raster in geographic coordinates
that is aligned with model inputs
month (int): current month of the year, such that month=0 indicates
January
shwave_path (string): path to shortwave radiation raster
Side effects:
Modifies the raster indicated by `shwave_path`
Returns:
None
"""
def shwave(month):
def _shwave(latitude):
"""Calculate shortwave radiation outside the atmosphere.
Parameters:
latitude (float): latitude of current site in degrees
month (int): current month of the year, such that month=1
indicates January
Returns:
shwave, short wave solar radiation outside the atmosphere
"""
# Julian date in middle of each month of the year
jday_list = [
16, 46, 75, 106, 136, 167, 197, 228, 259, 289, 320, 350]
jday = jday_list[month - 1]
transcof = 0.8
# Convert latitude from degrees to radians
rlatitude = latitude * (numpy.pi / 180.0)
# short wave solar radiation on a clear day
declin = 0.401426 * numpy.sin(6.283185 * (jday - 77.0) / 365.0)
temp = 1.0 - (-numpy.tan(rlatitude) * numpy.tan(declin))**2
temp[temp < 0.] = 0.
par1 = numpy.sqrt(temp)
par2 = (-numpy.tan(rlatitude) * numpy.tan(declin))
ahou = numpy.arctan2(par1, par2)
ahou[ahou < 0.] = 0.
solrad = (
917.0 * transcof * (
ahou * numpy.sin(rlatitude) * numpy.sin(declin) +
numpy.cos(rlatitude) *
numpy.cos(declin) * numpy.sin(ahou)))
# short wave radiation outside the atmosphere
shwave = solrad / transcof
return shwave
return _shwave
# calculate an intermediate input, latitude at each pixel center
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
latitude_raster_path = os.path.join(temp_dir, 'latitude.tif')
calc_latitude(template_raster, latitude_raster_path)
pygeoprocessing.raster_calculator(
[(latitude_raster_path, 1)],
shwave(month), shwave_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _reference_evapotranspiration(
max_temp_path, min_temp_path, shwave_path, fwloss_4_path,
pevap_path):
"""Calculate reference evapotranspiration.
Reference evapotranspiration from the FAO Penman-Monteith equation in
"Guidelines for computing crop water requirements", FAO Irrigation and
drainage paper 56 (http://www.fao.org/docrep/X0490E/x0490e08.htm),
modified by the parameter fwloss(4).
Parameters:
max_temp_path (string): path to maximum monthly temperature
min_temp_path (string): path to minimum monthly temperature
shwave_path (string): path to shortwave radiation outside the
atmosphere
fwloss_4_path (string): path to parameter, scaling factor for
reference evapotranspiration
pevap_path (string): path to result, reference evapotranspiration
raster
Side effects:
modifies or creates the raster indicated by `pevap_path`
Returns:
None
"""
def _calc_pevap(max_temp, min_temp, shwave, fwloss_4):
"""Calculate reference evapotranspiration.
Pevap.f
Parameters:
max_temp (numpy.ndarray): input, maximum monthly temperature
min_temp (numpy.ndarray): input, minimum monthly temperature
shwave (numpy.ndarray): derived, shortwave radiation outside the
atmosphere
fwloss_4 (numpy.ndarray): parameter, scaling factor for reference
evapotranspiration
Returns:
pevap, reference evapotranspiration
"""
const1 = 0.0023
const2 = 17.8
langleys2watts = 54.0
valid_mask = (
(~numpy.isclose(max_temp, maxtmp_nodata)) &
(~numpy.isclose(min_temp, mintmp_nodata)) &
(shwave != _TARGET_NODATA) &
(fwloss_4 != _IC_NODATA))
trange = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
trange[:] = _TARGET_NODATA
trange[valid_mask] = max_temp[valid_mask] - min_temp[valid_mask]
tmean = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
tmean[:] = _IC_NODATA
tmean[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.0
# daily reference evapotranspiration
daypet = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
daypet[:] = _TARGET_NODATA
in1 = const1 * (tmean[valid_mask] + const2)
in2 = numpy.sqrt(trange[valid_mask])
in3 = (shwave[valid_mask] / langleys2watts)
daypet[valid_mask] = (
const1 * (tmean[valid_mask] + const2) *
numpy.sqrt(trange[valid_mask]) *
(shwave[valid_mask] / langleys2watts))
# monthly reference evapotranspiration, from mm to cm,
# bounded to be at least 0.5
monpet = (daypet * 30.) / 10.
monpet[monpet <= 0.5] = 0.5
pevap = numpy.empty(fwloss_4.shape, dtype=numpy.float32)
pevap[:] = _TARGET_NODATA
pevap[valid_mask] = monpet[valid_mask] * fwloss_4[valid_mask]
return pevap
maxtmp_nodata = pygeoprocessing.get_raster_info(
max_temp_path)['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
min_temp_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
max_temp_path, min_temp_path, shwave_path, fwloss_4_path]],
_calc_pevap, pevap_path, gdal.GDT_Float32, _TARGET_NODATA)
def _potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg):
"""Calculate above- and belowground potential production.
Potential production of each plant functional type is calculated
as total potential production given incoming solar radiation,
limited by temperature, soil moisture, and obstruction by biomass and
litter. Further modification of potential production according to
limitation by water and nutrient availability is calculated in the
root:shoot ratio submodel. Lines 57-148 Potcrp.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=13
indicates month 13 of the simulation
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by `month_reg['h2ogef_1_<PFT>']` for each
plant functional type (PFT) where growth is scheduled to occur in
this month
creates the raster indicated by `month_reg['tgprod_pot_prod_<PFT>']`
for each plant functional type (PFT) where growth is scheduled to
occur in this month
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
if str(current_month) in veg_trait_table[pft_i]['growth_months']:
do_PFT.append(pft_i)
if not do_PFT:
return
def calc_ctemp(aglivc, pmxbio, maxtmp, pmxtmp, mintmp, pmntmp):
"""Calculate soil temperature relative to its effect on growth.
Soil temperature is calculated from monthly temperature inputs and
modified by total standing live biomass. Lines 69-84 Potcrp.f
Parameters:
aglivc (numpy.ndarray): derived, sum of aglivc (carbon in
aboveground live biomass) across plant functional types
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
pmxtmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly maximum temperature
mintmp (numpy.ndarray): input, average minimum monthly temperature
pmntmp (numpy.ndarray): parameter, scaling factor for effect of
biomass on monthly minimum temperature
Returns:
ctemp, effect of soil temperature on potential production
"""
bio = numpy.empty(aglivc.shape, dtype=numpy.float32)
bio[:] = _IC_NODATA
valid_mask = (
(aglivc >= 0.) &
(pmxbio != _IC_NODATA) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(pmxtmp != _IC_NODATA) &
(~numpy.isclose(mintmp, mintmp_nodata)) &
(pmntmp != _IC_NODATA))
bio[valid_mask] = aglivc[valid_mask] * 2.5
bio[bio > pmxbio] = pmxbio[bio > pmxbio]
bio[pmxbio < 0] = _IC_NODATA
# Maximum temperature
tmxs = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmxs[:] = _IC_NODATA
tmxs[valid_mask] = (
maxtmp[valid_mask] + (
(25.4/(1. + 18. * numpy.exp(-0.20 * maxtmp[valid_mask]))) *
(numpy.exp(pmxtmp[valid_mask] * bio[valid_mask]) - 0.13)))
# Minimum temperature
tmns = numpy.empty(aglivc.shape, dtype=numpy.float32)
tmns[:] = _IC_NODATA
tmns[valid_mask] = (
mintmp[valid_mask] +
(pmntmp[valid_mask] * bio[valid_mask] - 1.78))
# Average temperature
ctemp = numpy.empty(aglivc.shape, dtype=numpy.float32)
ctemp[:] = _IC_NODATA
ctemp[valid_mask] = (tmxs[valid_mask] + tmns[valid_mask])/2.
return ctemp
def calc_potprd(mintmp, maxtmp, ctemp, ppdf_1, ppdf_2, ppdf_3, ppdf_4):
"""Calculate the limiting effect of temperature on growth.
Estimated soil temperature restricts potential production according to
a Poisson Density Function curve described by the plant functional
type-specific parameters ppdf_1-4.. Lines 73-84 Potcrp.f
Parameters:
mintmp (numpy.ndarray): input, average minimum monthly temperature
maxtmp (numpy.ndarray): input, average maximum monthly
temperature
ctemp (numpy.ndarray): derived, soil temperature as calculated from
monthly temperature and modified by standing live biomass
ppdf_1 (numpy.ndarray): parameter, optimum temperature for growth
ppdf_2 (numpy.ndarray): parameter, maximum temperature for growth
ppdf_3 (numpy.ndarray): parameter, left curve shape for Poisson
Density Function curve describing growth as function of
temperature
ppdf_4 (numpy.ndarray): parameter, right curve shape for Poisson
Density Function curve describing growth as function of
temperature
Returns:
potprd, scaling factor describing potential production limited
by temperature
"""
valid_mask = (
(~numpy.isclose(mintmp, mintmp_nodata)) &
(~numpy.isclose(maxtmp, maxtmp_nodata)) &
(ctemp != _IC_NODATA) &
(ppdf_1 != _IC_NODATA) &
(ppdf_2 != _IC_NODATA) &
(ppdf_3 != _IC_NODATA) &
(ppdf_4 != _IC_NODATA))
frac = numpy.empty(ctemp.shape, dtype=numpy.float32)
frac[:] = _TARGET_NODATA
frac[valid_mask] = (
(ppdf_2[valid_mask] - ctemp[valid_mask]) /
(ppdf_2[valid_mask] - ppdf_1[valid_mask]))
avg_tmp = numpy.empty(ctemp.shape, dtype=numpy.float32)
avg_tmp[valid_mask] = (mintmp[valid_mask] + maxtmp[valid_mask]) / 2.
grow_mask = (
(avg_tmp > 0) &
(frac > 0) &
valid_mask)
potprd = numpy.empty(ctemp.shape, dtype=numpy.float32)
potprd[:] = _TARGET_NODATA
potprd[valid_mask] = 0.
potprd[grow_mask] = (numpy.exp(
(ppdf_3[grow_mask]/ppdf_4[grow_mask]) *
(1. - numpy.power(frac[grow_mask], ppdf_4[grow_mask]))) *
numpy.power(frac[grow_mask], ppdf_3[grow_mask]))
return potprd
def calc_h2ogef_1(
pevap, avh2o_1, precip, wc, pprpts_1, pprpts_2, pprpts_3):
"""Calculate the limiting factor of water availability on growth.
Soil moisture restricts potential production according to the ratio
of available water to reference evapotranspiration. The shape of the
linear relationship of this ratio to potential production is
controlled by the site parameters pprpts_1, pprpts_2, and pprpts_3.
Lines 57-64 Potcrp.f
Parameters:
pevap (numpy.ndarray): derived, reference evapotranspiration
avh2o_1 (numpy.ndarray): state variable, water available to this
plant functional type for growth
precip (numpy.ndarray): input, precipitation for the current month
wc (numpy.ndarray): derived, water content in soil layer 1
pprpts_1 (numpy.ndarray): parameter, the minimum ratio of
available water to reference evapotranspiration that limits
production completely
pprpts_2 (numpy.ndarray): parameter, influences the slope of the
line predicting potential production from available water
pprpts_3 (numpy.ndarray): parameter, the ratio of available water
to reference evapotranspiration above which production is
not restricted
Returns:
h2ogef_1, scaling factor describing potential production limited
by soil moisture
"""
valid_mask = (
(pevap != _TARGET_NODATA) &
(~numpy.isclose(avh2o_1, _SV_NODATA)) &
(~numpy.isclose(precip, precip_nodata)) &
(wc != _TARGET_NODATA) &
(pprpts_1 != _IC_NODATA) &
(pprpts_2 != _IC_NODATA) &
(pprpts_3 != _IC_NODATA))
h2ogef_prior = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_prior[:] = _TARGET_NODATA
h2ogef_prior[valid_mask] = numpy.where(
pevap[valid_mask] >= 0.01,
(avh2o_1[valid_mask] + precip[valid_mask])/pevap[valid_mask],
0.01)
intcpt = (
pprpts_1[valid_mask] + (pprpts_2[valid_mask] * wc[valid_mask]))
slope = 1. / (pprpts_3[valid_mask] - intcpt)
h2ogef_1 = numpy.empty(pevap.shape, dtype=numpy.float32)
h2ogef_1[:] = _TARGET_NODATA
h2ogef_1[valid_mask] = (
1.0 + slope *
(h2ogef_prior[valid_mask] - pprpts_3[valid_mask]))
h2ogef_1[valid_mask] = numpy.clip(h2ogef_1[valid_mask], 0.01, 1.)
return h2ogef_1
def calc_biof(sum_stdedc, sum_aglivc, strucc_1, pmxbio, biok5):
"""Calculate the effect of obstruction on growth.
Live biomass, standing dead biomass, and litter reduce potential
production through obstruction. The shape of the relationship between
standing biomass and litter and potential production is controlled by
the site parameter pmxbio and the plant functional type parameter
biok5. Lines 91-120 Potcrp.f
Parameters:
sum_stdedc (numpy.ndarray): derived, total carbon in standing dead
biomass across plant functional types
sum_aglivc (numpy.ndarray): derived, total carbon in aboveground
live biomass across plant functional types
strucc_1 (numpy.ndarray): derived, carbon in surface litter
pmxbio (numpy.ndarray): parameter, maximum biomass impact on
potential production
biok5 (numpy.ndarray): parameter, level of standing dead biomass
and litter
Returns:
biof, scaling factor describing potential production limited
by obstruction
"""
valid_mask = (
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(pmxbio != _IC_NODATA) &
(biok5 != _IC_NODATA))
bioc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioc[:] = _IC_NODATA
bioc[valid_mask] = numpy.where(
((sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]) <= 0.), 0.01,
(sum_stdedc[valid_mask] + 0.1*strucc_1[valid_mask]))
bioc[valid_mask] = numpy.where(
(bioc[valid_mask] > pmxbio[valid_mask]), pmxbio[valid_mask],
bioc[valid_mask])
bioprd = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
bioprd[:] = _IC_NODATA
bioprd[valid_mask] = 1. - (
bioc[valid_mask] / (biok5[valid_mask] + bioc[valid_mask]))
temp1 = 1. - bioprd
temp2 = temp1 * 0.75
temp3 = temp1 * 0.25
ratlc = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
ratlc[:] = _IC_NODATA
ratlc[valid_mask] = sum_aglivc[valid_mask] / bioc[valid_mask]
biof = numpy.empty(sum_stdedc.shape, dtype=numpy.float32)
biof[:] = _TARGET_NODATA
biof[valid_mask] = numpy.where(
ratlc[valid_mask] <= 1.,
(bioprd[valid_mask] + (temp2[valid_mask] * ratlc[valid_mask])),
numpy.where(
ratlc[valid_mask] <= 2.,
(bioprd[valid_mask] + temp2[valid_mask]) +
temp3[valid_mask] * (ratlc[valid_mask] - 1.),
1.))
return biof
def calc_tgprod_pot_prod(prdx_1, shwave, potprd, h2ogef_1, biof):
"""Calculate total potential production.
Total above- and belowground potential biomass production is calculated
as the total potential production given solar radiation and the
intrinsinc growth capacity of the plant functional type, modified by
limiting factors of temperature, soil moisture, and obstruction by
standing biomass and litter. Line 147 Potcrp.f
Parameters:
prdx_1 (numpy.ndarray): parameter, the intrinsic capacity of the
plant functional type for growth per unit of solar radiation
shwave (numpy.ndarray): derived, shortwave solar radiation outside
the atmosphere
potprd (numpy.ndarray): parameter, scaling factor describing
limiting effect of temperature
h2ogef_1 (numpy.ndarray): derived, scaling factor describing the
limiting effect of soil moisture
biof (numpy.ndarray): derived, scaling factor describing the
limiting effect of obstruction by standing biomass and litter
Returns:
tgprod_pot_prod, total above- and belowground potential biomass
production (g biomass)
"""
valid_mask = (
(prdx_1 != _IC_NODATA) &
(shwave != _TARGET_NODATA) &
(potprd != _TARGET_NODATA) &
(h2ogef_1 != _TARGET_NODATA) &
(biof != _TARGET_NODATA))
tgprod_pot_prod = numpy.empty(prdx_1.shape, dtype=numpy.float32)
tgprod_pot_prod[:] = _TARGET_NODATA
tgprod_pot_prod[valid_mask] = (
prdx_1[valid_mask] * shwave[valid_mask] * potprd[valid_mask] *
h2ogef_1[valid_mask] * biof[valid_mask])
return tgprod_pot_prod
# temporary intermediate rasters for calculating total potential production
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
# site-level temporary calculated values
for val in ['sum_aglivc', 'sum_stdedc', 'ctemp', 'shwave', 'pevap']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
# PFT-level temporary calculated values
for pft_i in pft_id_set:
for val in [
'aglivc_weighted', 'stdedc_weighted', 'potprd', 'biof']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
# temporary parameter rasters for calculating total potential production
param_val_dict = {}
# site-level parameters
for val in [
'pmxbio', 'pmxtmp', 'pmntmp', 'fwloss_4', 'pprpts_1',
'pprpts_2', 'pprpts_3']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for val in [
'ppdf_1', 'ppdf_2', 'ppdf_3', 'ppdf_4', 'biok5', 'prdx_1']:
for pft_i in do_PFT:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
maxtmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
mintmp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
precip_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['precip_{}'.format(month_index)])['nodata'][0]
# calculate intermediate quantities that do not differ between PFTs:
# sum of aglivc (standing live biomass) and stdedc (standing dead biomass)
# across PFTs, weighted by % cover of each PFT
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
# ctemp, soil temperature relative to impacts on growth
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'],
param_val_dict['pmxbio'],
aligned_inputs['max_temp_{}'.format(current_month)],
param_val_dict['pmxtmp'],
aligned_inputs['min_temp_{}'.format(current_month)],
param_val_dict['pmntmp']]],
calc_ctemp, temp_val_dict['ctemp'], gdal.GDT_Float32, _IC_NODATA)
# shwave, shortwave radiation outside the atmosphere
_shortwave_radiation(
aligned_inputs['site_index'], current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
temp_val_dict['shwave'],
param_val_dict['fwloss_4'],
temp_val_dict['pevap'])
# calculate quantities that differ between PFTs
for pft_i in do_PFT:
# potprd, the limiting effect of temperature
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['min_temp_{}'.format(current_month)],
aligned_inputs['max_temp_{}'.format(current_month)],
temp_val_dict['ctemp'],
param_val_dict['ppdf_1_{}'.format(pft_i)],
param_val_dict['ppdf_2_{}'.format(pft_i)],
param_val_dict['ppdf_3_{}'.format(pft_i)],
param_val_dict['ppdf_4_{}'.format(pft_i)]]],
calc_potprd, temp_val_dict['potprd_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# h2ogef_1, the limiting effect of soil water availability
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pevap'],
prev_sv_reg['avh2o_1_{}_path'.format(pft_i)],
aligned_inputs['precip_{}'.format(month_index)],
pp_reg['wc_path'],
param_val_dict['pprpts_1'],
param_val_dict['pprpts_2'],
param_val_dict['pprpts_3']]],
calc_h2ogef_1, month_reg['h2ogef_1_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# biof, the limiting effect of obstruction
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_stdedc'],
temp_val_dict['sum_aglivc'],
prev_sv_reg['strucc_1_path'],
param_val_dict['pmxbio'],
param_val_dict['biok5_{}'.format(pft_i)]]],
calc_biof, temp_val_dict['biof_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['prdx_1_{}'.format(pft_i)],
temp_val_dict['shwave'],
temp_val_dict['potprd_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
temp_val_dict['biof_{}'.format(pft_i)]]],
calc_tgprod_pot_prod,
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_favail_P(sv_reg, param_val_dict):
"""Calculate the fraction of P in surface layer available to plants.
This must be performed after the sum of mineral N in the surface layer
is calculated because the fraction of labile P available to plants is
impacted by the amount of mineral N in the surface layer.
Parameters:
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month, including minerl_1_1, mineral N in the
surface layer
param_val_dict (dict): map of key, path pairs giving paths to
site-level parameters, including favail_4, favail_5, favail_6,
and favail_2
Side effects:
modifies or creates the raster indicated by
`param_val_dict['favail_2']`
Returns:
None
"""
def favail_P_op(minerl_1_1, favail_4, favail_5, favail_6):
"""Calculate the fraction of P in surface layer available to plants.
The fraction of labile P available to plants depends on mineral N in
the surface layer and site parameters favail_4, favail_5, favail_6.
Line 395 Simsom.f
Parameters:
minerl_1_1 (numpy.ndarray): state variable, mineral N in the
surface layer
favail_4 (numpy.ndarray): parameter, minimum fraction of P
available
favail_5 (numpy.ndarray): parameter, maximum fraction of P
available
favail_6 (numpy.ndarray): parameter, mineral N in surface layer
required to attain maximum fraction of P available
Returns:
favail_P, fraction of mineral P available to plants
"""
valid_mask = (
(~numpy.isclose(minerl_1_1, _SV_NODATA)) &
(favail_4 != _IC_NODATA) &
(favail_5 != _IC_NODATA) &
(favail_6 != _IC_NODATA))
interim = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
interim[:] = _IC_NODATA
interim[valid_mask] = (
favail_4[valid_mask] + minerl_1_1[valid_mask] *
(favail_5[valid_mask] - favail_4[valid_mask]) /
favail_6[valid_mask])
favail_P = numpy.empty(minerl_1_1.shape, dtype=numpy.float32)
favail_P[:] = _IC_NODATA
favail_P[valid_mask] = numpy.maximum(
favail_4[valid_mask], numpy.minimum(
interim[valid_mask], favail_5[valid_mask]))
return favail_P
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['minerl_1_1_path'],
param_val_dict['favail_4'],
param_val_dict['favail_5'],
param_val_dict['favail_6']]],
favail_P_op, param_val_dict['favail_2'],
gdal.GDT_Float32, _IC_NODATA)
def _calc_avail_mineral_nutrient(pft_param_dict, sv_reg, iel, target_path):
"""Calculate one mineral nutrient available to one plant functional type.
The mineral nutrient available to a plant functional type is calculated
from the mineral nutrient content of soil layers accessible by that
plant function type.
Parameters:
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
iel (int): integer index for current nutrient (1=N, 2=P)
target_path (string): path to raster to contain available mineral
nutrient for this plant functional type and nutrient
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
nlay = int(pft_param_dict['nlaypg'])
mineral_raster_list = [
sv_reg['minerl_{}_{}_path'.format(lyr, iel)] for lyr in range(
1, nlay + 1)]
raster_list_sum(
mineral_raster_list, _SV_NODATA, target_path, _TARGET_NODATA,
nodata_remove=True)
def _calc_available_nutrient(
pft_i, iel, pft_param_dict, sv_reg, site_param_table, site_index_path,
availm_path, favail_path, tgprod_path, eavail_path):
"""Calculate nutrient available to a plant functional type.
The nutrient available is the sum of mineral nutrient (N or P) in soil
layers accessible by the roots of the plant functional type, modified
by the fraction of nutrient available to plants and the current root
biomass.
Parameters:
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
pft_param_dict (dict): map of key, value pairs giving the values of
parameters for this plant functional type
(i.e., veg_trait_table[pft_i] for this pft_i)
sv_reg (dict): map of key, path pairs giving paths to state
variables for the current month
site_index_path (string): path to site spatial index raster
availm_path (string): path to raster containing available mineral
nutrient for the given plant functional type and nutrient
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
favail_path (string): path to raster containing the appropriate value
of the parameter favail. For nitrogen, this parameter is supplied
directly as user input, but for phosphorus, it must be calculated
from other parameters.
tgprod_path (string): path to raster containing total potential
production (g biomass)
eavail_path (string): path to location to store the result, nutrient
available to the plant functional type
Side effects:
modifies or creates the raster indicated by `eavail_path`
Returns:
None
"""
def calc_eavail(rictrl, bglivc, riint, availm, favail, crpstg):
"""Calculate available nutrient.
Parameters:
rictrl (numpy.ndarray): parameter, scaling factor used to
calculate the impact of root biomass on nutrient availability
bglivc (numpy.ndarray): state variable, carbon in belowground
live biomass
riint (numpy.ndarray): parameter, intercept used to calculate the
impact of root biomass on nutrient availability
availm (numpy.ndarray): derived, the sum of mineral nutrient in
soil layers accessible by this plant functional type
favail (numpy.ndarray): parameter, fraction of the nutrient
available each month to plants
crpstg (numpy.ndarray): state variable, nutrient in
retranslocation storage pool for the plant functional type
Returns:
eavail, the nutrient available to the plant functional type
"""
valid_mask = (
(rictrl != _IC_NODATA) &
(~numpy.isclose(bglivc, _SV_NODATA)) &
(riint != _IC_NODATA) &
(availm != _TARGET_NODATA) &
(favail != _IC_NODATA) &
(~numpy.isclose(crpstg, _SV_NODATA)))
rimpct = numpy.empty(rictrl.shape, dtype=numpy.float32)
rimpct[:] = _TARGET_NODATA
rimpct[valid_mask] = numpy.where(
((rictrl[valid_mask] * bglivc[valid_mask] * 2.5) > 33.),
1., 1. - riint[valid_mask] * numpy.exp(
-rictrl[valid_mask] * bglivc[valid_mask] * 2.5))
eavail = numpy.empty(rictrl.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = (
(availm[valid_mask] * favail[valid_mask] * rimpct[valid_mask]) +
crpstg[valid_mask])
return eavail
def add_symbiotic_fixed_N(eavail_prior, snfxmx, tgprod):
"""Add nitrogen fixed by the plant to nutrient available.
Some nitrogen may be fixed by the plant, and this must be added
to available mineral nitrogen. Nitrogen fixed by the plant is
calculated from total potential production and the maximum
rate of N fixation.
Parameters:
eavail_prior (numpy.ndarray): derived, mineral nitrogen available
to the plant functional type, calculated with calc_eavail()
snfxmx (numpy.ndarray): parameter, maximum rate of symbiotic
nitrogen fixation
tgprod (numpy.ndarray): derived, total above- and belowground
potential production (g biomass)
Returns:
eavail, total N available including N fixed by the plant
"""
valid_mask = (
(eavail_prior != _TARGET_NODATA) &
(snfxmx != _IC_NODATA) &
(tgprod != _TARGET_NODATA))
maxNfix = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
maxNfix[:] = _TARGET_NODATA
maxNfix[valid_mask] = snfxmx[valid_mask] * (tgprod[valid_mask] / 2.5)
eavail = numpy.empty(eavail_prior.shape, dtype=numpy.float32)
eavail[:] = _TARGET_NODATA
eavail[valid_mask] = eavail_prior[valid_mask] + maxNfix[valid_mask]
return eavail
# temporary intermediate rasters for calculating available nutrient
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in ['rictrl', 'riint']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for val in ['snfxmx_1']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
fill_val = pft_param_dict[val]
pygeoprocessing.new_raster_from_base(
site_index_path, target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['rictrl'],
sv_reg['bglivc_{}_path'.format(pft_i)],
param_val_dict['riint'],
availm_path, favail_path,
sv_reg['crpstg_{}_{}_path'.format(iel, pft_i)]]],
calc_eavail, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
if iel == 1:
eavail_prior_path = os.path.join(temp_dir, 'eavail_prior.tif')
shutil.copyfile(eavail_path, eavail_prior_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
eavail_prior_path,
param_val_dict['snfxmx_1'],
tgprod_path]],
add_symbiotic_fixed_N, eavail_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_nutrient_demand(
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path, demand_path):
"""Calculate the demand of one nutrient by a plant functional type.
Demand is calculated from total biomass production, the fraction of biomass
production allocated to roots, and the minimum carbon/nutrient ratios of
above- and belowground live biomass. Lines 88-92 CropDynC.f and line
65, Nutrlm.f
Parameters:
biomass_production_path (string): path to raster giving total
biomass production
fraction_allocated_to_roots_path (string): path to raster giving
the fraction fo total biomass production allocated to roots
cercrp_min_above_path (string): path to raster giving the minimum
ratio of carbon to nutrient in aboveground live biomass
cercrp_min_below_path (string): path to raster giving the minimum
ratio of carbon to nutrient in belowground live biomass
Side effects:
modifies or creates the raster indicated by `demand_path`
Returns:
None
"""
def nutrient_demand_op(
biomass_production, root_fraction, cercrp_min_above,
cercrp_min_below):
"""Calculate nutrient demand.
Parameters:
biomass_production (numpy.ndarray): derived, total biomass
production
root_fraction (numpy.ndarray): derived, fraction of biomass
allocated to roots
cercrp_min_above (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new aboveground live material
cercrp_min_below (numpy.ndarray): derived, minimum carbon to
nutrient ratio of new belowground live material
Returns:
demand_e, nutrient demand
"""
valid_mask = (
(biomass_production != _TARGET_NODATA) &
(root_fraction != _TARGET_NODATA) &
(cercrp_min_above != _TARGET_NODATA) &
(cercrp_min_above > 0) &
(cercrp_min_below > 0) &
(cercrp_min_below != _TARGET_NODATA))
demand_above = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_above[:] = _TARGET_NODATA
demand_above[valid_mask] = (
((biomass_production[valid_mask] *
(1. - root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_above[valid_mask]))
demand_below = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_below[:] = _TARGET_NODATA
demand_below[valid_mask] = (
((biomass_production[valid_mask] *
(root_fraction[valid_mask])) / 2.5) *
(1. / cercrp_min_below[valid_mask]))
demand_e = numpy.empty(root_fraction.shape, dtype=numpy.float32)
demand_e[:] = _TARGET_NODATA
demand_e[valid_mask] = (
demand_above[valid_mask] + demand_below[valid_mask])
return demand_e
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
biomass_production_path, fraction_allocated_to_roots_path,
cercrp_min_above_path, cercrp_min_below_path]],
nutrient_demand_op, demand_path,
gdal.GDT_Float32, _TARGET_NODATA)
def calc_provisional_fracrc(
annual_precip, frtcindx, bgppa, bgppb, agppa, agppb,
cfrtcw_1, cfrtcw_2, cfrtcn_1, cfrtcn_2):
"""Calculate provisional fraction of carbon allocated to roots.
A temporary provisional fraction of carbon allocated to roots must be
calculated prior to calculating plant demand for N and P. The value
of this provisional fraction depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 36-47 cropDynC.f
Parameters:
annual_precip (numpy.ndarray): derived, sum of monthly
precipitation over twelve months including the current month
frtcindx (numpy.ndarray): parameter, flag indicating whether
root:shoot allocation follows the Great Plains equation
(frtcindx=0) or as a perennial plant (frtcindx=1)
bgppa (numpy.ndarray): parameter, intercept in regression
estimating belowground production from annual precipitation
if frtcindx=0
bgppb (numpy.ndarray): parameter, slope in regression estimating
belowground production from annual precipitation if
frtcindx=0
agppa (numpy.ndarray): parameter, intercept in regression
estimating aboveground production from annual precipitation
if frtcindx=0
agppb (numpy.ndarray): parameter, slope in regression estimating
aboveground production from annual precipitation if
frtcindx=0
cfrtcw_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum water stress if frtcindx=1
cfrtcw_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots without water stress if frtcindx=1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of carbon
allocated to roots under maximum nutrient stress if frtcindx=1
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of carbon
allocated to roots under no nutrient stress if frtcindx=1
Returns:
fracrc_p, provisional fraction of carbon allocated to roots
"""
valid_mask = (
(annual_precip != _TARGET_NODATA) &
(frtcindx != _IC_NODATA) &
(bgppa != _IC_NODATA))
rtsh = numpy.empty(annual_precip.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[valid_mask] = (
(bgppa[valid_mask] +
annual_precip[valid_mask] * bgppb[valid_mask]) /
(agppa[valid_mask] + annual_precip[valid_mask] *
agppb[valid_mask]))
fracrc_p = numpy.empty(annual_precip.shape, dtype=numpy.float32)
fracrc_p[:] = _TARGET_NODATA
fracrc_p[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0,
(1.0 / (1.0 / rtsh[valid_mask] + 1.0)),
((cfrtcw_1[valid_mask] + cfrtcw_2[valid_mask] +
cfrtcn_1[valid_mask] + cfrtcn_2[valid_mask]) / 4.0))
return fracrc_p
def calc_ce_ratios(
pramn_1_path, pramn_2_path, aglivc_path, biomax_path,
pramx_1_path, pramx_2_path, prbmn_1_path, prbmn_2_path,
prbmx_1_path, prbmx_2_path, annual_precip_path, pft_i, iel,
month_reg):
"""Calculate minimum and maximum carbon to nutrient ratios.
Minimum and maximum C/E ratios are used to calculate demand for a
nutrient by a plant functional type. This function calculates the
ratios for above- and belowground plant portions, for one plant
functional type and one nutrient. Fltce.f
Parameters:
pramn_1_path (string): path to raster containing the parameter
pramn_<iel>_1, the minimum aboveground ratio with zero biomass
pramn_2_path (string): path to raster containing the parameter
pramn_<iel>_2, the minimum aboveground ratio with biomass greater
than or equal to biomax
aglivc_path (string): path to raster containing carbon in
aboveground live biomass
biomax_path (string): path to raster containing the parameter
biomax, the biomass above which the ratio equals pramn_2
or pramx_2
pramx_1_path (string): path to raster containing the parameter
pramx_<iel>_1, the maximum aboveground ratio with zero biomass
pramx_2_path (string): path to raster containing the parameter
pramx_<iel>_2, the maximum aboveground ratio with biomass greater
than or equal to biomax
prbmn_1_path (string): path to raster containing the parameter
prbmn_<iel>_1, intercept of regression to predict minimum
belowground ratio from annual precipitation
prbmn_2_path (string): path to raster containing the parameter
prbmn_<iel>_2, slope of regression to predict minimum belowground
ratio from annual precipitation
prbmx_1_path (string): path to raster containing the parameter
prbmx_<iel>_1, intercept of regression to predict maximum
belowground ratio from annual precipitation
prbmx_2_path (string): path to raster containing the parameter
prbmx_<iel>_2, slope of regression to predict maximum belowground
ratio from annual precipitation
annual_precip_path (string): path to annual precipitation raster
pft_i (int): plant functional type index
iel (int): nutrient index (iel=1 indicates N, iel=2 indicates P)
month_reg (dict): map of key, path pairs giving paths to
intermediate calculated values that are shared between
submodels
Side effects:
creates the rasters indicated by
`month_reg['cercrp_min_above_<iel>_<pft_i>']`,
`month_reg['cercrp_max_above_<iel>_<pft_i>']`,
`month_reg['cercrp_min_below_<iel>_<pft_i>']`,
`month_reg['cercrp_max_below_<iel>_<pft_i>']`,
Returns:
None
"""
def calc_above_ratio(pra_1, pra_2, aglivc, biomax):
"""Calculate carbon to nutrient ratio for aboveground material.
Parameters:
pra_1 (numpy.ndarray): parameter, minimum or maximum ratio
with zero biomass
pra_2 (numpy.ndarray): parameter, minimum or maximum ratio
with biomass greater than or equal to biomax
aglivc (numpy.ndarray): state variable, carbon in aboveground
live material
biomax (numpy:ndarray): parameter, biomass above which the
ratio equals pra_2
Returns:
cercrp_above, carbon to nutrient ratio for aboveground
material
"""
valid_mask = (
(pra_1 != _IC_NODATA) &
(pra_2 != _IC_NODATA) &
(~numpy.isclose(aglivc, _SV_NODATA)) &
(biomax != _IC_NODATA))
cercrp_above = numpy.empty(pra_1.shape, dtype=numpy.float32)
cercrp_above[:] = _TARGET_NODATA
cercrp_above[valid_mask] = numpy.minimum(
(pra_1[valid_mask] + (pra_2[valid_mask] - pra_1[valid_mask]) *
2.5 * aglivc[valid_mask] / biomax[valid_mask]),
pra_2[valid_mask])
return cercrp_above
def calc_below_ratio(prb_1, prb_2, annual_precip):
"""Calculate carbon to nutrient ratio for belowground material.
Parameters:
prb_1 (numpy.ndarray): parameter, intercept of regression
to predict ratio from annual precipitation
prb_2 (numpy.ndarray): parameter, slope of regression to
predict ratio from annual precipitation
annual_precip (numpy.ndarray): derived, precipitation in twelve
months including the current month
Returns:
cercrp_below, carbon to nutrient ratio for belowground
material
"""
valid_mask = (
(prb_1 != _IC_NODATA) &
(prb_2 != _IC_NODATA) &
(annual_precip != _TARGET_NODATA))
cercrp_below = numpy.empty(prb_1.shape, dtype=numpy.float32)
cercrp_below[:] = _TARGET_NODATA
cercrp_below[valid_mask] = (
prb_1[valid_mask] +
(prb_2[valid_mask] * annual_precip[valid_mask]))
return cercrp_below
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramn_1_path, pramn_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pramx_1_path, pramx_2_path, aglivc_path, biomax_path]],
calc_above_ratio,
month_reg['cercrp_max_above_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmn_1_path, prbmn_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prbmx_1_path, prbmx_2_path, annual_precip_path]],
calc_below_ratio,
month_reg['cercrp_max_below_{}_{}'.format(iel, pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
def calc_revised_fracrc(
frtcindx_path, fracrc_p_path, totale_1_path, totale_2_path,
demand_1_path, demand_2_path, h2ogef_1_path, cfrtcw_1_path,
cfrtcw_2_path, cfrtcn_1_path, cfrtcn_2_path, fracrc_r_path):
"""
Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots includes the
impacts of water and nutrient limitation. The method of the
revised calculation depends on whether the plant functional
type is modeled as a perennial plant or with the "Great Plains"
equation of Parton et al. 1987, "Analysis of factors controlling soil
organic matter levels in Great Plains grasslands", Soil Science
Society of America Journal. Lines 96-104, cropDynC.f, froota.f
Parameters:
frtcindx_path (string): path to raster containing the parameter
frtcindx
fracrc_p_path (string): path to raster containing provisional
fraction of carbon allocated to roots
totale_1_path (string): path to raster containing total available
nitrogen
totale_2_path (string): path to raster containing total available
phosphorus
demand_1_path (string): path to raster containing nitrogen demand
demand_2_path (string): path to raster containing phosphorus demand
h2ogef_1_path (string): path to raster containing the limiting
effect of water availability on growth
cfrtcw_1_path (string): path to raster containing the parameter
cfrtcw_1
cfrtcw_2_path (string): path to raster containing the parameter
cfrtcw_2
cfrtcn_1_path (string): path to raster containing the parameter
cfrtcn_1
cfrtcn_2_path (string): path to raster containing the parameter
cfrtcn_2
fracrc_r_path (string): path to raster that should contain the
result, revised fraction of carbon allocated to roots
Side effects:
creates the raster indicated by `fracrc_r_path`
Returns:
None
"""
def calc_a2drat(totale, demand):
"""Calculate the ratio of available nutrient to nutrient demand.
The ratio of nutrient available to demand for the nutrient is
restricted to be between 0 and 1.
Parameters:
totale (numpy.ndarray): derived, nutrient available
demand (numpy.ndarray): derived, demand for the nutrient
Returns:
a2drat, the ratio of available nutrient to demand, restricted
to be between 0 and 1
"""
valid_mask = (
(totale != _TARGET_NODATA) &
(demand != _TARGET_NODATA))
a2drat = numpy.empty(totale.shape, dtype=numpy.float32)
a2drat[:] = _TARGET_NODATA
demand_mask = ((demand > 0) & valid_mask)
a2drat[valid_mask] = 1.
a2drat[demand_mask] = numpy.clip(
totale[demand_mask] / demand[demand_mask], 0., 1.)
return a2drat
def calc_perennial_fracrc(
h2ogef, cfrtcw_1, cfrtcw_2, a2drat_1, a2drat_2, cfrtcn_1,
cfrtcn_2):
"""Calculate fraction C allocated to roots for a perennial plant.
The fraction of carbon allocated to roots is determined by
water availability, described by h2ogef, and nutrient availability,
described by a2drat_1 for nitrogen and a2drat_2 for phosphorus.
Lines 114-125 froota.f
Parameters:
h2ogef (numpy.ndarray): derived, the limiting factor of water
availability on growth
cfrtcw_1 (numpy.ndarray): parameter, the maximum fraction of
carbon allocated to roots with maximum water stress
cfrtcw_2 (numpy.ndarray): parameter, the minimum fraction of
carbon allocated to roots with no water stress
a2drat_1 (numpy.ndarray): derived, the ratio of available
nitrogen to nitrogen demand, restricted to be between 0
and 1
a2drat_2 (numpy.ndarray): derived, the ratio of available
phosphorus to phosphorus demand, restricted to be between
0 and 1
cfrtcn_1 (numpy.ndarray): parameter, maximum fraction of
carbon allocated to roots with maximum nutrient stress
cfrtcn_2 (numpy.ndarray): parameter, minimum fraction of
carbon allocated to roots with no nutrient stress
Returns:
fracrc_perennial, revised fraction of C allocated to roots for
a perennial plant
"""
valid_mask = (
(h2ogef != _TARGET_NODATA) &
(cfrtcw_1 != _IC_NODATA) &
(cfrtcw_2 != _IC_NODATA) &
(a2drat_1 != _TARGET_NODATA) &
(a2drat_2 != _TARGET_NODATA) &
(cfrtcn_1 != _IC_NODATA) &
(cfrtcn_2 != _IC_NODATA))
h2oeff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
h2oeff[:] = _TARGET_NODATA
h2oeff[valid_mask] = (
(cfrtcw_2[valid_mask] - cfrtcw_1[valid_mask]) *
(h2ogef[valid_mask] - 1.) + cfrtcw_2[valid_mask])
ntreff_1 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_1[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_1[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff_2 = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff_2[:] = _TARGET_NODATA
ntreff_1[valid_mask] = (
(cfrtcn_2[valid_mask] - cfrtcn_1[valid_mask]) *
(a2drat_2[valid_mask] - 1.0) + cfrtcn_2[valid_mask])
ntreff = numpy.empty(h2ogef.shape, dtype=numpy.float32)
ntreff[:] = _TARGET_NODATA
ntreff[valid_mask] = numpy.maximum(
ntreff_1[valid_mask], ntreff_2[valid_mask])
fracrc_perennial = numpy.empty(
h2ogef.shape, dtype=numpy.float32)
fracrc_perennial[:] = _TARGET_NODATA
fracrc_perennial[valid_mask] = numpy.minimum(
numpy.maximum(h2oeff[valid_mask], ntreff[valid_mask]), 0.99)
return fracrc_perennial
def revised_fracrc_op(frtcindx, fracrc_p, fracrc_perennial):
"""Calculate revised fraction of carbon allocated to roots.
The revised fraction of carbon allocated to roots is calculated
according to the parameter frtcindx. If frtcindx=0 (use the "Great
Plains equation"), the revised fraction is equal to the provisional
fraction. If frtcindx=1 (a perennial plant), the revised fraction
is calculated from water and nutrient stress.
Parameters:
frtcindx (numpy.ndarray): parameter, indicates whether revised
fraction of carbon allocated to roots should follow the
"Great Plains equation" or the algorithm for a perennial
plant
fracrc_p (numpy.ndarray): derived, provisional fraction of
carbon allocated to roots
fracrc_perennial (numpy.ndarray): derived, fraction of
carbon allocated to roots for a perennial plant
Returns:
fracrc_r, revised fraction of carbon allocated to roots
"""
valid_mask = (
(frtcindx != _IC_NODATA) &
(fracrc_p != _TARGET_NODATA) &
(fracrc_perennial != _TARGET_NODATA))
fracrc_r = numpy.empty(frtcindx.shape, dtype=numpy.float32)
fracrc_r[:] = _TARGET_NODATA
fracrc_r[valid_mask] = numpy.where(
frtcindx[valid_mask] == 0, fracrc_p[valid_mask],
fracrc_perennial[valid_mask])
return fracrc_r
# temporary intermediate rasters for calculating revised fracrc
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in ['a2drat_1', 'a2drat_2', 'fracrc_perennial']:
temp_val_dict[val] = os.path.join(
temp_dir, '{}.tif'.format(val))
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_1_path, demand_1_path]],
calc_a2drat, temp_val_dict['a2drat_1'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [totale_2_path, demand_2_path]],
calc_a2drat, temp_val_dict['a2drat_2'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
h2ogef_1_path, cfrtcw_1_path, cfrtcw_2_path,
temp_val_dict['a2drat_1'], temp_val_dict['a2drat_2'],
cfrtcn_1_path, cfrtcn_2_path]],
calc_perennial_fracrc, temp_val_dict['fracrc_perennial'],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
frtcindx_path, fracrc_p_path,
temp_val_dict['fracrc_perennial']]],
revised_fracrc_op, fracrc_r_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def grazing_effect_on_aboveground_production(tgprod, fracrc, flgrem, grzeff):
"""Adjust aboveground production with the impact of grazing.
Removal of biomass by herbivores directly impacts potential
aboveground production according to the amount of biomass removed
and the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0, 3, or 4, aboveground production is not
changed. If grzeff=1 or 6, production decreases linearly with
biomass removed; if grzeff=2 or 5, biomass removed has a quadratic
impact on production. Grazrst.f
Parameters:
tgprod (numpy.ndarray): derived, total potential biomass
production restricted by water and nutrient availability
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
Returns:
agprod, aboveground production impacted by grazing
"""
valid_mask = (
(tgprod != _TARGET_NODATA) &
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA))
agprod_prior = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod_prior[:] = _TARGET_NODATA
agprod_prior[valid_mask] = (
tgprod[valid_mask] * (1. - fracrc[valid_mask]))
linear_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
(1. - (2.21*flgrem[valid_mask])) * agprod_prior[valid_mask],
0.02)
quadratic_effect = numpy.empty(tgprod.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = (
(1. + 2.6*flgrem[valid_mask] -
(5.83*(numpy.power(flgrem[valid_mask], 2)))) *
agprod_prior[valid_mask])
quadratic_effect[valid_mask] = numpy.maximum(
quadratic_effect[valid_mask], 0.02)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 3, 4]))
linear_mask = (valid_mask & numpy.isin(grzeff, [1, 6]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 5]))
agprod = numpy.empty(tgprod.shape, dtype=numpy.float32)
agprod[:] = _TARGET_NODATA
agprod[no_effect_mask] = agprod_prior[no_effect_mask]
agprod[linear_mask] = linear_effect[linear_mask]
agprod[quadratic_mask] = quadratic_effect[quadratic_mask]
return agprod
def grazing_effect_on_root_shoot(fracrc, flgrem, grzeff, gremb):
"""Adjust root:shoot ratio according to the impact of grazing.
Removal of biomass by herbivores directly impacts the root:shoot
ratio of production according to the amount of biomass removed and
the parameter grzeff, which acts as a switch to determine the
effect. If grzeff=0 or 1, the root:shoot ratio is not changed.
If grzeff=2 or 3, biomass removed has a quadratic impact on the
root:shoot ratio. If grzeff=4, 5, or 6, biomass removed has a
linear effect on the root:shoot ratio. The parameter gremb
multiplies the linear impact of grazing when grzeff=4, 5 or 6.
Grzrst.f
Parameters:
fracrc (numpy.ndarray): derived, fraction of carbon allocated
to roots according to water and nutrient availability
flgrem (numpy.ndarray): derived, fraction of live biomass
removed by grazing in previous monthly step
grzeff (numpy.ndarray): parameter, the effect of defoliation on
production and root:shoot ratio
grzemb (numpy.ndarray): parameter, grazing effect multiplier
Returns:
rtsh, root:shoot ratio impacted by grazing
"""
valid_mask = (
(fracrc != _TARGET_NODATA) &
(flgrem != _TARGET_NODATA) &
(grzeff != _IC_NODATA) &
(gremb != _IC_NODATA))
rtsh_prior = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh_prior[:] = _TARGET_NODATA
rtsh_prior[valid_mask] = (
fracrc[valid_mask] / (1. - fracrc[valid_mask]))
quadratic_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
quadratic_effect[:] = _TARGET_NODATA
quadratic_effect[valid_mask] = numpy.maximum(
rtsh_prior[valid_mask] + 3.05 * flgrem[valid_mask] -
11.78 * numpy.power(flgrem[valid_mask], 2),
0.01)
linear_effect = numpy.empty(fracrc.shape, dtype=numpy.float32)
linear_effect[:] = _TARGET_NODATA
linear_effect[valid_mask] = numpy.maximum(
1. - (flgrem[valid_mask] * gremb[valid_mask]),
0.01)
no_effect_mask = (valid_mask & numpy.isin(grzeff, [0, 1]))
quadratic_mask = (valid_mask & numpy.isin(grzeff, [2, 3]))
linear_mask = (valid_mask & numpy.isin(grzeff, [4, 5, 6]))
rtsh = numpy.empty(fracrc.shape, dtype=numpy.float32)
rtsh[:] = _TARGET_NODATA
rtsh[no_effect_mask] = rtsh_prior[no_effect_mask]
rtsh[quadratic_mask] = quadratic_effect[quadratic_mask]
rtsh[linear_mask] = linear_effect[linear_mask]
return rtsh
def calc_tgprod_final(rtsh, agprod):
"""Calculate final total potential production.
Final total potential production is calculated from aboveground
production impacted by grazing and the final root:shoot ratio
impacted by grazing.
Parameters:
rtsh (numpy.ndarray): derived, final root:shoot ratio impacted
by grazing
agprod (numpy.ndarray): derived, final aboveground potential
production impacted by grazing
Returns:
tgprod, final total potential production
"""
valid_mask = (
(rtsh != _TARGET_NODATA) &
(agprod != _TARGET_NODATA))
tgprod = numpy.empty(rtsh.shape, dtype=numpy.float32)
tgprod[:] = _TARGET_NODATA
tgprod[valid_mask] = (
agprod[valid_mask] + (rtsh[valid_mask] * agprod[valid_mask]))
return tgprod
def calc_final_tgprod_rtsh(
tgprod_pot_prod_path, fracrc_path, flgrem_path, grzeff_path,
gremb_path, tgprod_path, rtsh_path):
"""Calculate final potential production and root:shoot ratio.
Final potential production and root:shoot ratio include the impact of
grazing. First calculate final aboveground production including the
impact of grazing; then calculate rtsh, the final root:shoot ratio
including the impact of grazing; then calculate tgprod, final total
potential production, from final aboveground production and final
root:shoot ratio. Grazrst.f
Parameters:
tgprod_pot_prod_path (string): path to raster containing total
potential biomass production restricted by water and nutrient
availability, prior to effects of grazing
fracrc_path (string): path to raster containing the fraction of
carbon production allocated to roots according to restriction
by water and nutrient availability, prior to effects of
grazing
flgrem_path (string): path to raster containing the fraction of
live aboveground biomass removed by herbivores according to
diet selection in the previous step
grzeff_path (string): path to raster containing the parameter
grzeff, the effect of defolation on production and root:shoot
ratio
gremb_path (string): path to raster containing the parameter
gremb, the grazing effect multiplier
tgprod_path (string): path to raster containing final total
potential production (g biomass)
rtsh_path (string): path to raster containing final root:shoot
ratio of potential production
Side effects:
creates the raster indicated by tgprod_path
creates the raster indicated by rtsh_path
Returns:
None
"""
# temporary intermediate rasters for grazing effect
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
agprod_path = os.path.join(temp_dir, 'agprod.tif')
# grazing effect on aboveground production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tgprod_pot_prod_path, fracrc_path, flgrem_path,
grzeff_path]],
grazing_effect_on_aboveground_production,
agprod_path, gdal.GDT_Float32, _TARGET_NODATA)
# grazing effect on final root:shoot ratio
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
fracrc_path, flgrem_path, grzeff_path, gremb_path]],
grazing_effect_on_root_shoot, rtsh_path,
gdal.GDT_Float32, _TARGET_NODATA)
# final total potential production
pygeoprocessing.raster_calculator(
[(path, 1) for path in [rtsh_path, agprod_path]],
calc_tgprod_final, tgprod_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg):
"""Calculate final potential production and root:shoot ratio.
Final potential biomass production and root:shoot ratio is calculated
according to nutrient availability and demand for the nutrient, and the
impact of defoliation by herbivores. CropDynC.f
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including the site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
current_month (int): month of the year, such that current_month=1
indicates January
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
year_reg (dict): map of key, path pairs giving paths to rasters that
are modified once per year, including annual precipitation
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
Side effects:
creates the raster indicated by
`month_reg['tgprod_<PFT>']`, total potential production (g biomass)
for each plant functional type (PFT)
creates the raster indicated by `month_reg['rtsh_<PFT>']` for each
plant functional type (PFT)
Returns:
None
"""
# if growth does not occur this month for all PFTs,
# skip the rest of the function
do_PFT = []
for pft_i in pft_id_set:
# growth occurs in growth months and when senescence not scheduled
do_growth = (
current_month != veg_trait_table[pft_i]['senescence_month'] and
str(current_month) in veg_trait_table[pft_i]['growth_months'])
if do_growth:
do_PFT.append(pft_i)
if not do_PFT:
return
# temporary intermediate rasters for root:shoot submodel
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in do_PFT:
for val in ['fracrc_p', 'fracrc', 'availm']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
for iel in [1, 2]:
for val in ['eavail', 'demand']:
temp_val_dict[
'{}_{}_{}'.format(val, iel, pft_i)] = os.path.join(
temp_dir, '{}_{}_{}.tif'.format(val, iel, pft_i))
# temporary parameter rasters for root:shoot submodel
param_val_dict = {}
# site-level parameters
for val in [
'bgppa', 'bgppb', 'agppa', 'agppb', 'favail_1', 'favail_4',
'favail_5', 'favail_6']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# PFT-level parameters
for pft_i in do_PFT:
for val in [
'frtcindx', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'biomax', 'cfrtcw_1', 'cfrtcw_2', 'cfrtcn_1', 'cfrtcn_2',
'grzeff', 'gremb']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path, gdal.GDT_Float32,
[_IC_NODATA], fill_value_list=[fill_val])
for val in [
'pramn_1_1', 'pramn_1_2', 'pramx_1_1', 'pramx_1_2',
'prbmn_1_1', 'prbmn_1_2', 'prbmx_1_1', 'prbmx_1_2',
'pramn_2_1', 'pramn_2_2', 'pramx_2_1', 'pramx_2_2',
'prbmn_2_1', 'prbmn_2_2', 'prbmx_2_1', 'prbmx_2_2']:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict[
'{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], target_path,
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
# the parameter favail_2 must be calculated from current mineral N in
# surface layer
param_val_dict['favail_2'] = os.path.join(temp_dir, 'favail_2.tif')
_calc_favail_P(prev_sv_reg, param_val_dict)
for pft_i in do_PFT:
# fracrc_p, provisional fraction of C allocated to roots
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
year_reg['annual_precip_path'],
param_val_dict['frtcindx_{}'.format(pft_i)],
param_val_dict['bgppa'],
param_val_dict['bgppb'],
param_val_dict['agppa'],
param_val_dict['agppb'],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)]]],
calc_provisional_fracrc,
temp_val_dict['fracrc_p_{}'.format(pft_i)],
gdal.GDT_Float32, _TARGET_NODATA)
for iel in [1, 2]:
# persistent ratios used here and in plant growth submodel
calc_ce_ratios(
param_val_dict['pramn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramn_{}_2_{}'.format(iel, pft_i)],
prev_sv_reg['aglivc_{}_path'.format(pft_i)],
param_val_dict['biomax_{}'.format(pft_i)],
param_val_dict['pramx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['pramx_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmn_{}_2_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_1_{}'.format(iel, pft_i)],
param_val_dict['prbmx_{}_2_{}'.format(iel, pft_i)],
year_reg['annual_precip_path'], pft_i, iel, month_reg)
# sum of mineral nutrient in accessible soil layers
_calc_avail_mineral_nutrient(
veg_trait_table[pft_i], prev_sv_reg, iel,
temp_val_dict['availm_{}'.format(pft_i)])
# eavail_iel, available nutrient
_calc_available_nutrient(
pft_i, iel, veg_trait_table[pft_i], prev_sv_reg,
site_param_table, aligned_inputs['site_index'],
temp_val_dict['availm_{}'.format(pft_i)],
param_val_dict['favail_{}'.format(iel)],
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['eavail_{}_{}'.format(iel, pft_i)])
# demand_iel, demand for the nutrient
_calc_nutrient_demand(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
month_reg['cercrp_min_above_{}_{}'.format(iel, pft_i)],
month_reg['cercrp_min_below_{}_{}'.format(iel, pft_i)],
temp_val_dict['demand_{}_{}'.format(iel, pft_i)])
# revised fraction of carbon allocated to roots
calc_revised_fracrc(
param_val_dict['frtcindx_{}'.format(pft_i)],
temp_val_dict['fracrc_p_{}'.format(pft_i)],
temp_val_dict['eavail_1_{}'.format(pft_i)],
temp_val_dict['eavail_2_{}'.format(pft_i)],
temp_val_dict['demand_1_{}'.format(pft_i)],
temp_val_dict['demand_2_{}'.format(pft_i)],
month_reg['h2ogef_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_1_{}'.format(pft_i)],
param_val_dict['cfrtcw_2_{}'.format(pft_i)],
param_val_dict['cfrtcn_1_{}'.format(pft_i)],
param_val_dict['cfrtcn_2_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)])
# final potential production and root:shoot ratio accounting for
# impacts of grazing
calc_final_tgprod_rtsh(
month_reg['tgprod_pot_prod_{}'.format(pft_i)],
temp_val_dict['fracrc_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)],
param_val_dict['grzeff_{}'.format(pft_i)],
param_val_dict['gremb_{}'.format(pft_i)],
month_reg['tgprod_{}'.format(pft_i)],
month_reg['rtsh_{}'.format(pft_i)])
# clean up temporary files
shutil.rmtree(temp_dir)
def _snow(
site_index_path, site_param_table, precip_path, tave_path,
max_temp_path, min_temp_path, prev_snow_path, prev_snlq_path,
current_month, snowmelt_path, snow_path, snlq_path,
inputs_after_snow_path, pet_rem_path):
"""Account for precipitation as snow and snowmelt from snowpack.
Determine whether precipitation falls as snow. Track the fate of
new and existing snowpack including evaporation and melting. Track the
the remaining snowpack and liquid in snow and potential
evapotranspiration remaining after evaporation of snow. Snowcent.f
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
precip_path (string): path to raster containing precipitation for the
current month
tave_path (string): path to raster containing average temperature for
the current month
max_temp_path (string): path to raster containing maximum temperature
for the current month
min_temp_path (string): path to raster containing minimum temperature
for the current month
prev_snow_path (string): path to raster containing current snowpack
prev_snlq_path (string): path to raster containing current liquid in
snow
current_month (int): current month of the year, such that month=0
indicates January
snow_path (string): path to raster to contain modified snowpack
snlq_path (string): path to raster to contain modified liquid in snow
inputs_after_snow_path (string): path to raster containing water inputs
to the system after accounting for snow
pet_rem_path (string): path to raster containing potential
evapotranspiration remaining after any evaporation of snow
Side effects:
creates the raster indicated by `snowmelt_path`
creates the raster indicated by `snow_path`
creates the raster indicated by `snlq_path`
creates the raster indicated by `inputs_after_snow_path`
creates the raster indicated by `pet_rem_path`
Returns:
None
"""
def calc_snow_moisture(return_type):
"""Calculate change in snow, pet, snow liquid, and moisture inputs.
Record changes in snowpack, liquid in snow, potential
evapotranspiration energy, and liquid draining into soil from snow.
Parameters:
return_type (string): flag indicating whether modified snowpack,
modified liquid in snow, modified potential evapotranspiration,
or soil moisture inputs after snow should be returned
Returns:
the function `_calc_snow_moisture`
"""
def _calc_snow_moisture(
tave, precip, snow, snlq, pet, tmelt_1, tmelt_2, shwave):
"""Calculate the fate of moisture from snow.
Calculate new snowfall or rain on snow. Calculate direct
evaporation of snow and consumption of potential
evapotranspiration energy. Calculate snowmelt and liquid draining
from snow into the soil.
Parameters:
tave (numpy.ndarray): derived, average temperature
precip (numpy.ndarray): input, precipitation for this month
snow (numpy.ndarray): derived, existing snowpack prior to new
snowfall
snlq (numpy.ndarray): derived, existing liquid in snowpack
pet (numpy.ndarray): derived, potential evapotranspiration
tmelt_1 (numpy.ndarray): parameter, minimum temperature above
which snow will melt
tmelt_2 (numpy.ndarray): parameter, ratio between degrees above
the minimum temperature and cm of snow that will melt
shwave (numpy.ndarray): derived, shortwave radiation outside
the atmosphere
Returns:
snowmelt if return_type is 'snowmelt'
snow_revised if return_type is 'snow'
snlq_revised if return_type is 'snlq'
pet_revised if return_type is 'pet'
inputs_after_snow if return_type is 'inputs_after_snow'
"""
valid_mask = (
(tave != _IC_NODATA) &
(~numpy.isclose(precip, precip_nodata)) &
(~numpy.isclose(snow, _SV_NODATA)) &
(~numpy.isclose(snlq, _SV_NODATA)) &
(pet != _TARGET_NODATA) &
(tmelt_1 != _IC_NODATA) &
(tmelt_2 != _IC_NODATA) &
(shwave != _TARGET_NODATA))
inputs_after_snow = numpy.empty(precip.shape, dtype=numpy.float32)
inputs_after_snow[:] = _TARGET_NODATA
inputs_after_snow[valid_mask] = precip[valid_mask]
snowfall_mask = (valid_mask & (tave <= 0))
snow[snowfall_mask] = (snow[snowfall_mask] + precip[snowfall_mask])
inputs_after_snow[snowfall_mask] = 0.
rain_on_snow_mask = (
(valid_mask) &
(tave > 0) &
(snow > 0))
snlq[rain_on_snow_mask] = (
snlq[rain_on_snow_mask] + precip[rain_on_snow_mask])
inputs_after_snow[rain_on_snow_mask] = 0.
snowtot = numpy.zeros(snow.shape, dtype=numpy.float32)
snowtot[valid_mask] = numpy.maximum(
snow[valid_mask] + snlq[valid_mask], 0)
evap_mask = (valid_mask & (snowtot > 0.))
evsnow = numpy.zeros(snow.shape, dtype=numpy.float32)
evsnow[evap_mask] = numpy.minimum(
snowtot[evap_mask], pet[evap_mask] * 0.87)
snow_revised = numpy.empty(snow.shape, dtype=numpy.float32)
snow_revised[:] = _TARGET_NODATA
snow_revised[valid_mask] = snow[valid_mask]
snow_revised[evap_mask] = numpy.maximum(
snow[evap_mask] - evsnow[evap_mask] *
(snow[evap_mask] / snowtot[evap_mask]), 0.)
snlq_revised = numpy.zeros(snow.shape, dtype=numpy.float32)
snlq_revised[valid_mask] = snlq[valid_mask]
snlq_revised[evap_mask] = numpy.maximum(
snlq[evap_mask] - evsnow[evap_mask] *
(snlq[evap_mask] / snowtot[evap_mask]), 0.)
pet_revised = numpy.empty(snow.shape, dtype=numpy.float32)
pet_revised[:] = _TARGET_NODATA
pet_revised[valid_mask] = pet[valid_mask]
pet_revised[evap_mask] = numpy.maximum(
(pet[evap_mask] - evsnow[evap_mask] / 0.87), 0.)
melt_mask = (valid_mask & (tave >= tmelt_1))
snowmelt = numpy.zeros(snow.shape, dtype=numpy.float32)
snowmelt[melt_mask] = numpy.clip(
tmelt_2[melt_mask] * (tave[melt_mask] - tmelt_1[melt_mask]) *
shwave[melt_mask], 0., snow_revised[melt_mask])
snow_revised[melt_mask] = (
snow_revised[melt_mask] - snowmelt[melt_mask])
snlq_revised[melt_mask] = (
snlq_revised[melt_mask] + snowmelt[melt_mask])
drain_mask = (melt_mask & (snlq_revised > 0.5 * snow_revised))
inputs_after_snow[drain_mask] = (
snlq_revised[drain_mask] - 0.5 * snow_revised[drain_mask])
snlq_revised[drain_mask] = (
snlq_revised[drain_mask] - inputs_after_snow[drain_mask])
if return_type == 'snowmelt':
return snowmelt
elif return_type == 'snow':
return snow_revised
elif return_type == 'snlq':
return snlq_revised
elif return_type == 'pet':
return pet_revised
else:
return inputs_after_snow
return _calc_snow_moisture
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in ['shwave', 'pet']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict = {}
for val in ['tmelt_1', 'tmelt_2', 'fwloss_4']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
max_temp_nodata = pygeoprocessing.get_raster_info(
max_temp_path)['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
min_temp_path)['nodata'][0]
precip_nodata = pygeoprocessing.get_raster_info(
precip_path)['nodata'][0]
# solar radiation outside the atmosphere
_shortwave_radiation(precip_path, current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
max_temp_path, min_temp_path, temp_val_dict['shwave'],
param_val_dict['fwloss_4'], temp_val_dict['pet'])
# calculate snowmelt
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture('snowmelt'), snowmelt_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in snow
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("snow"), snow_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in liquid in snow
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("snlq"), snlq_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate change in potential evapotranspiration energy
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("pet"), pet_rem_path,
gdal.GDT_Float32, _TARGET_NODATA)
# calculate soil moisture inputs draining from snow after snowmelt
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tave_path, precip_path, prev_snow_path,
prev_snlq_path, temp_val_dict['pet'],
param_val_dict['tmelt_1'], param_val_dict['tmelt_2'],
temp_val_dict['shwave']]],
calc_snow_moisture("inputs_after_snow"), inputs_after_snow_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _calc_aboveground_live_biomass(sum_aglivc, sum_tgprod):
"""Calculate aboveground live biomass for purposes of soil water.
Live biomass impacts loss of moisture inputs through canopy
interception and evapotranspiration. Because soil moisture is computed
after potential production, but before actual growth of plants, some of
the predicted growth in biomass (i.e., tgprod) is added here to
existing standing live biomass (i.e., aglivc * 2.5; line 80,
potprod.f, in Century).
Parameters:
sum_aglivc (numpy.ndarray): the sum of aglivc across plant
functional types (pft), weighted by % cover of the pft
sum_tgprod (numpy.ndarray): sum of tgprod, potential production
limited by soil water, nutrient availability, and grazing,
across pfts weighted by % cover of the pft
Returns:
aliv, aboveground live biomass for soil water submodel
"""
valid_mask = (
(sum_aglivc != _TARGET_NODATA) &
(sum_tgprod != _TARGET_NODATA))
aliv = numpy.empty(sum_aglivc.shape, dtype=numpy.float32)
aliv[:] = _TARGET_NODATA
aliv[valid_mask] = (
sum_aglivc[valid_mask] * 2.5 + (0.25 * sum_tgprod[valid_mask]))
return aliv
def _calc_standing_biomass(aliv, sum_stdedc):
"""Calculate total aboveground standing biomass for soil water.
Total standing biomass impacts loss of moisture inputs by increasing
total canopy interception and decreasing bare soil evaporation. It is
the sum of live and dead standing biomass across plant functional
types, bounded to be <= 800 g/m2.
Parameters:
aliv (numpy.ndarray): aboveground live biomass, calculated from
aglivc and tgprod across plant functional types
sum_stdedc (numpy.ndarray): aboveground standing dead C summed
across plant functional types
Returns:
sd, total aboveground standing biomass for soil water.
"""
valid_mask = (
(aliv != _TARGET_NODATA) &
(sum_stdedc != _TARGET_NODATA))
sd = numpy.empty(aliv.shape, dtype=numpy.float32)
sd[:] = _TARGET_NODATA
sd[valid_mask] = numpy.minimum(
aliv[valid_mask] + (sum_stdedc[valid_mask] * 2.5), 800.)
return sd
def subtract_surface_losses(return_type):
"""Calculate surface losses to runoff and surface evaporation.
Calculate the loss of surface moisture to runoff, canopy interception,
and bare soil evaporation.
Parameters:
return_type (string): flag indicating whether soil moisture inputs
after surface losses or total surface evaporation should be
returned
Returns:
the function `_subtract_surface_losses`
"""
def _subtract_surface_losses(
inputs_after_snow, fracro, precro, snow, alit, sd, fwloss_1,
fwloss_2, pet_rem):
"""Subtract moisture losses to runoff, interception, and evaporation.
Of the surface water inputs from precipitation and snowmelt, some water
is lost to runoff (line 113, H2olos.f). After runoff, some water is
lost to canopy interception and bare soil evaporation, if there is no
snow cover. Loss to canopy interception and bare soil evaporation is
a function of live, standing dead, and surface litter biomass. The
total loss of moisture to interception and bare soil evaporation is
bounded to be less than or equal to 40% of reference
evapotranspiration.
Parameters:
inputs_after_snow (numpy.ndarray): derived, surface water inputs
from precipitation and snowmelt, prior to runoff
fracro (numpy.ndarray): parameter, fraction of surface water
above precro that is lost to runoff
precro (numpy.ndarray): parameter, amount of surface water that
must be available for runoff to occur
snow (numpy.ndarray): derived, current snowpack
alit (numpy.ndarray): derived, biomass in surface litter
sd (numpy.ndarray): derived, total standing biomass
fwloss_1 (numpy.ndarray): parameter, scaling factor for
interception and evaporation of precip by vegetation
fwloss_2 (numpy.ndarray): parameter, scaling factor for bare soil
evaporation of precip
pet_rem (numpy.ndarray): derived, potential evaporation remaining
after evaporation of snow
Returns:
inputs_after_surface, surface water inputs to soil after runoff
and surface evaporation are subtracted, if return_type is
'inputs_after_surface'
absevap, bare soil evaporation, if return_type is 'absevap'
evap_losses, total surface evaporation, if return_type is
'evap_losses'
"""
valid_mask = (
(inputs_after_snow != _TARGET_NODATA) &
(fracro != _IC_NODATA) &
(precro != _IC_NODATA) &
(snow != _TARGET_NODATA) &
(alit != _TARGET_NODATA) &
(sd != _TARGET_NODATA) &
(fwloss_1 != _IC_NODATA) &
(fwloss_2 != _IC_NODATA) &
(pet_rem != _TARGET_NODATA))
runoff = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
runoff[:] = _TARGET_NODATA
runoff[valid_mask] = numpy.maximum(
fracro[valid_mask] *
(inputs_after_snow[valid_mask] - precro[valid_mask]), 0.)
inputs_after_runoff = numpy.empty(
inputs_after_snow.shape, dtype=numpy.float32)
inputs_after_runoff[:] = _TARGET_NODATA
inputs_after_runoff[valid_mask] = (
inputs_after_snow[valid_mask] - runoff[valid_mask])
evap_mask = (valid_mask & (snow <= 0))
# loss to interception
aint = numpy.zeros(inputs_after_snow.shape, dtype=numpy.float32)
aint[evap_mask] = (
(0.0003 * alit[evap_mask] + 0.0006 * sd[evap_mask]) *
fwloss_1[evap_mask])
# loss to bare soil evaporation
absevap = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
absevap[:] = _TARGET_NODATA
absevap[valid_mask] = 0.
absevap[evap_mask] = (
0.5 *
numpy.exp((-0.002 * alit[evap_mask]) - (0.004 * sd[evap_mask])) *
fwloss_2[evap_mask])
# total losses to interception and evaporation
evap_losses = numpy.empty(inputs_after_snow.shape, dtype=numpy.float32)
evap_losses[:] = _TARGET_NODATA
evap_losses[valid_mask] = 0.
evap_losses[evap_mask] = (
numpy.minimum(((absevap[evap_mask] + aint[evap_mask]) *
inputs_after_runoff[evap_mask]), (0.4 * pet_rem[evap_mask])))
# remaining inputs after evaporation
inputs_after_surface = numpy.empty(
inputs_after_snow.shape, dtype=numpy.float32)
inputs_after_surface[:] = _TARGET_NODATA
inputs_after_surface[valid_mask] = inputs_after_runoff[valid_mask]
inputs_after_surface[evap_mask] = (
inputs_after_runoff[evap_mask] - evap_losses[evap_mask])
if return_type == 'inputs_after_surface':
return inputs_after_surface
elif return_type == 'absevap':
return absevap
elif return_type == 'evap_losses':
return evap_losses
return _subtract_surface_losses
def calc_potential_transpiration(return_type):
"""Calculate potential transpiration and evaporation from soil layer 1.
Calculate potential transpiration (trap), potential evaporation from
soil layer 1 (pevp), and initial transpiration water loss (tran).
Remove the initial transpiration water loss from soil moisture inputs
at this step.
Parameters:
return_type (string): flag indicating whether potential transpiration,
potential evaporation from soil layer 1, or modified moisture
inputs should be returned
Returns:
the function `_calc_potential_transpiration`
"""
def _calc_potential_transpiration(
pet_rem, evap_losses, tave, aliv, current_moisture_inputs):
"""Calculate potential water losses to transpiration.
Calculate potential transpiration (trap), the total potential
transpiration from all soil layers by plants. Calculate potential
evaporation from soil layer 1 (pevp); this amount is calculated prior
to transpiration but actually removed after water loss to transpiration
from all soil layers has been accounted. Calculate actual transpiration
(tran). Remove actual transpiration water losses from moisture inputs
before distributing water to soil layers. This is necessary for a
monthly time step to give plants in wet climates adequate access to
water for transpiration.
Parameters:
pet_rem (numpy.ndarray): derived, potential evapotranspiration
remaining after evaporation of snow
evap_losses (numpy.ndarray): derived, total surface evaporation
tave (numpy.ndarray): derived, average temperature
aliv (numpy.ndarray): aboveground live biomass, calculated from
aglivc and tgprod across plant functional types
current_moisture_inputs (numpy.ndarray): derived, moisture inputs
after surface losses
Returns:
trap if return_type is 'trap'
pevp if return_type is 'pevp'
modified_moisture_inputs if return_type is
'modified_moisture_inputs'
"""
valid_mask = (
(pet_rem != _TARGET_NODATA) &
(evap_losses != _TARGET_NODATA) &
(tave != _IC_NODATA) &
(aliv != _TARGET_NODATA) &
(current_moisture_inputs != _TARGET_NODATA))
trap = numpy.empty(pet_rem.shape, dtype=numpy.float32)
trap[:] = _TARGET_NODATA
trap[valid_mask] = pet_rem[valid_mask] - evap_losses[valid_mask]
no_transpiration_mask = (valid_mask & (tave < 2))
trap[no_transpiration_mask] = 0.
transpiration_mask = (valid_mask & (tave >= 2))
trap[transpiration_mask] = numpy.maximum(
numpy.minimum(
trap[transpiration_mask], pet_rem[transpiration_mask] *
0.65 * (1 - numpy.exp(-0.02 * aliv[transpiration_mask]))), 0.)
trap[valid_mask] = numpy.maximum(trap[valid_mask], 0.01)
pevp = numpy.empty(pet_rem.shape, dtype=numpy.float32)
pevp[:] = _TARGET_NODATA
pevp[valid_mask] = numpy.maximum(
pet_rem[valid_mask] - trap[valid_mask] - evap_losses[valid_mask],
0.)
tran = numpy.empty(pet_rem.shape, dtype=numpy.float32)
tran[:] = _TARGET_NODATA
tran[valid_mask] = numpy.minimum(
trap[valid_mask] - 0.01, current_moisture_inputs[valid_mask])
trap[valid_mask] = trap[valid_mask] - tran[valid_mask]
modified_moisture_inputs = numpy.empty(
pet_rem.shape, dtype=numpy.float32)
modified_moisture_inputs[:] = _TARGET_NODATA
modified_moisture_inputs[valid_mask] = (
current_moisture_inputs[valid_mask] - tran[valid_mask])
if return_type == 'trap':
return trap
elif return_type == 'pevp':
return pevp
elif return_type == 'modified_moisture_inputs':
return modified_moisture_inputs
return _calc_potential_transpiration
def distribute_water_to_soil_layer(return_type):
"""Distribute moisture inputs to one soil layer prior to transpiration.
Soil moisture inputs after runoff, evaporation, and initial
transpiration are distributed to soil layers sequentially according to the
field capacity of the layer. If moisture inputs exceed the field capacity
of the layer, the remainder of moisture inputs move down to the next
adjacent soil layer.
Returns:
the function `_distribute_water`
"""
def _distribute_water(adep, afiel, asmos, current_moisture_inputs):
"""Revise soil moisture in this soil layer prior to transpiration.
Moisture inputs coming into this soil layer are compared to the field
capacity of the layer. If the field capacity is exceeded, the excess
moisture moves from this layer to the next adjacent layer.
Parameters:
adep (numpy.ndarray): parameter, depth of this soil layer in cm
afiel (numpy.ndarray): derived, field capacity of this layer
asmos (numpy.ndarray): state variable, current soil moisture
content of this soil layer
current_moisture_inputs (numpy.ndarray): derived, moisture inputs
added to this soil layer
Returns:
asmos_revised, revised soil moisture in this layer, if return_type
is 'asmos_revised'
amov, moisture flowing from this layer into the next, if
return_type is 'amov'
"""
valid_mask = (
(adep != _IC_NODATA) &
(afiel != _TARGET_NODATA) &
(~numpy.isclose(asmos, _SV_NODATA)) &
(current_moisture_inputs != _TARGET_NODATA))
afl = numpy.empty(adep.shape, dtype=numpy.float32)
afl[:] = _TARGET_NODATA
afl[valid_mask] = adep[valid_mask] * afiel[valid_mask]
asmos_interm = numpy.empty(adep.shape, dtype=numpy.float32)
asmos_interm[:] = _TARGET_NODATA
asmos_interm[valid_mask] = (
asmos[valid_mask] + current_moisture_inputs[valid_mask])
amov = numpy.empty(adep.shape, dtype=numpy.float32)
amov[:] = _TARGET_NODATA
exceeded_mask = (valid_mask & (asmos_interm > afl))
amov[exceeded_mask] = asmos_interm[exceeded_mask]
asmos_revised = numpy.empty(adep.shape, dtype=numpy.float32)
asmos_revised[:] = _TARGET_NODATA
asmos_revised[valid_mask] = asmos_interm[valid_mask]
asmos_revised[exceeded_mask] = afl[exceeded_mask]
notexceeded_mask = (valid_mask & (asmos_interm <= afl))
amov[notexceeded_mask] = 0.
if return_type == 'asmos_revised':
return asmos_revised
elif return_type == 'amov':
return amov
return _distribute_water
def calc_available_water_for_transpiration(asmos, awilt, adep):
"""Calculate water available for transpiration in one soil layer.
The water available for transpiration is the amount of water in the soil
layer minus the wilting point of the soil layer.
Parameters:
asmos (numpy.ndarray): derived, interim moisture in the soil layer
awilt (numpy.ndarray): derived, wilting point of the soil layer
adep (numpy.ndarray): parameter, depth of the soil layer in cm
Returns:
avw, available water for transpiration
"""
valid_mask = (
(asmos != _TARGET_NODATA) &
(awilt != _TARGET_NODATA) &
(adep != _IC_NODATA))
avw = numpy.empty(asmos.shape, dtype=numpy.float32)
avw[:] = _TARGET_NODATA
avw[valid_mask] = numpy.maximum(
asmos[valid_mask] - awilt[valid_mask] * adep[valid_mask], 0.)
return avw
def revise_potential_transpiration(trap, tot):
"""Revise potential transpiration according to water available.
Total potential transpiration, trap, is revised to be less than or equal
to total water available for transpiration, tot. Total water available
for transpiration is the sum of available water per soil layer.
Line 241, H2olos.f
Parameters:
trap (numpy.ndarray): derived, potential transpiration water losses
tot (numpy.ndarray): derived, total soil water available for
transpiration
Returns:
trap_revised, revised total potential transpiration
"""
valid_mask = (
(trap != _TARGET_NODATA) &
(tot != _TARGET_NODATA))
trap_revised = numpy.empty(trap.shape, dtype=numpy.float32)
trap_revised[:] = _TARGET_NODATA
trap_revised[valid_mask] = numpy.minimum(trap[valid_mask], tot[valid_mask])
return trap_revised
def remove_transpiration(return_type):
"""Remove water from a soil layer via transpiration by plants.
Transpiration from one soil layer is apportioned from total potential
transpiration, trap, according to the available water for transpiration in
this soil layer. Lines 218-294, H2olos.f
Parameters:
return_type (string): flag indicating whether avinj (water in this soil
layer available to plants for growth) or asmos (total water in this
soil layer) should be returned
Returns:
the function `_remove_transpiration`
"""
def _remove_transpiration(asmos, awilt, adep, trap, awwt, tot2):
"""Remove water from a soil layer via transpiration by plants.
Parameters:
asmos (numpy.ndarray): derived, interim moisture in this soil layer
after additions from current month precipitation
awilt (numpy.ndarray): derived, wilting point of this soil layer
adep (numpy.ndarray): parameter, depth of this soil layer in cm
trap (numpy.ndarray): derived, total potential transpiration
across all soil layers accessible by plant roots
awwt (numpy.ndarray): derived, water available for transpiration
in this soil layer weighted by transpiration depth distribution
parameter
tot2 (numpy.ndarray): derived, the sum of weighted water available
for transpiration across soil layers
Returns:
avinj, water available to plants for growth in this layer after
losses to transpiration, if return type is 'avinj'
asmos_revised, total water in this layer after losses to
transpiration, if return type is 'asmos'
"""
valid_mask = (
(asmos != _TARGET_NODATA) &
(awilt != _TARGET_NODATA) &
(adep != _IC_NODATA) &
(trap != _TARGET_NODATA) &
(awwt != _TARGET_NODATA) &
(tot2 != _TARGET_NODATA))
avinj = numpy.empty(asmos.shape, dtype=numpy.float32)
avinj[:] = _TARGET_NODATA
avinj[valid_mask] = numpy.maximum(
asmos[valid_mask] - awilt[valid_mask] * adep[valid_mask], 0.)
transpire_mask = (valid_mask & (tot2 > 0))
transpiration_loss = numpy.zeros(asmos.shape, dtype=numpy.float32)
transpiration_loss[transpire_mask] = numpy.minimum(
(trap[transpire_mask] *
awwt[transpire_mask]) / tot2[transpire_mask],
avinj[transpire_mask])
avinj[valid_mask] = avinj[valid_mask] - transpiration_loss[valid_mask]
asmos_revised = numpy.empty(asmos.shape, dtype=numpy.float32)
asmos_revised[:] = _TARGET_NODATA
asmos_revised[valid_mask] = (
asmos[valid_mask] - transpiration_loss[valid_mask])
if return_type == 'avinj':
return avinj
elif return_type == 'asmos':
return asmos_revised
return _remove_transpiration
def calc_relative_water_content_lyr_1(asmos_1, adep_1, awilt_1, afiel_1):
"""Calculate the relative water content of soil layer 1.
The relative water content of soil layer 1, prior to any evaporation losses
from soil layer 1, is used to estimate water available for evaporation
from soil layer 1. Line 280, H2olos.f
Parameters:
asmos_1 (numpy.ndarray): derived, interim moisture in soil layer 1
after losses to transpiration
adep_1 (numpy.ndarray): parameter, depth of soil layer 1 in cm
awilt_1 (numpy.ndarray): derived, wilting point of soil layer 1
afiel_1 (numpy.ndarray): derived, field capacity of soil layer 1
Returns:
rwcf_1, relative water content of soil layer 1
"""
valid_mask = (
(asmos_1 != _TARGET_NODATA) &
(adep_1 != _IC_NODATA) &
(awilt_1 != _TARGET_NODATA) &
(afiel_1 != _TARGET_NODATA))
rwcf_1 = numpy.empty(asmos_1.shape, dtype=numpy.float32)
rwcf_1[valid_mask] = (
(asmos_1[valid_mask] / adep_1[valid_mask] - awilt_1[valid_mask]) /
(afiel_1[valid_mask] - awilt_1[valid_mask]))
return rwcf_1
def calc_evaporation_loss(rwcf_1, pevp, absevap, asmos_1, awilt_1, adep_1):
"""Calculate evaporation from soil layer 1.
Some moisture is lost from soil layer 1 (i.e., the top soil layer) to
evaporation, separate from surface evaporation and transpiration by plants.
This amount is calculated from potential soil evaporation, which was
calculated from potential evapotranspiration prior to allocation of water
to soil layers. It is restricted to be less than or equal to water
available in this soil layer.
Parameters:
rwcf_1 (numpy.ndarray): derived, relative water content of soil layer 1
pevp (numpy.ndarray): derived, potential evaporation from soil layer 1
absevap (numpy.ndarray): derived, bare soil evaporation
asmos_1 (numpy.ndarray): derived, interim moisture in soil layer 1
awilt_1 (numpy.ndarray): derived, wilting point of soil layer 1
adep_1 (numpy.ndarray): parameter, depth of soil layer 1 in cm
Returns:
evlos, moisture evaporated from soil layer 1
"""
valid_mask = (
(rwcf_1 != _TARGET_NODATA) &
(pevp != _TARGET_NODATA) &
(absevap != _TARGET_NODATA) &
(asmos_1 != _TARGET_NODATA) &
(awilt_1 != _TARGET_NODATA) &
(adep_1 != _IC_NODATA))
evmt = numpy.empty(rwcf_1.shape, dtype=numpy.float32)
evmt[:] = _TARGET_NODATA
evmt[valid_mask] = numpy.maximum(
(rwcf_1[valid_mask] - 0.25) / (1 - 0.25), 0.01)
evlos = numpy.empty(rwcf_1.shape, dtype=numpy.float32)
evlos[:] = _TARGET_NODATA
evlos[valid_mask] = numpy.minimum(
evmt[valid_mask] * pevp[valid_mask] * absevap[valid_mask] * 0.1,
numpy.maximum(
asmos_1[valid_mask] - awilt_1[valid_mask] *
adep_1[valid_mask], 0.))
return evlos
def _soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg):
"""Allocate precipitation to runoff, transpiration, and soil moisture.
Simulate snowfall and account for evaporation and melting of the snow pack.
Allocate the flow of precipitation through interception by plants,
runoff and infiltration into the soil, percolation through the soil, and
transpiration by plants. Update soil moisture in each soil layer.
Estimate avh2o_1 for each PFT (water available to the PFT for growth),
avh2o_3 (water in first two soil layers), and amov_<lyr> (saturated flow
of water between soil layers, used in decomposition and mineral leaching).
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters, including nlaypg, number of soil
layers access by plant roots
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=1
indicates month 1 of the simulation
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving persistent parameters
including field capacity of each soil layer
pft_id_set (set): set of integers identifying plant functional types
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month
Side effects:
creates the raster indicated by `sv_reg['snow_path']`, current snowpack
creates the raster indicated by `sv_reg['snlq_path']`, current liquid
in snow
creates the raster indicated by
`sv_reg['asmos_<lyr>_path']`, soil moisture content, for each soil
layer accessible by roots of any plant functional type
creates the rasters indicated by `month_reg['amov_<lyr>']` for each
soil layer, saturated flow of water from that soil layer
creates the raster indicated by `sv_reg['avh2o_1_<PFT>_path']`, soil
moisture available for growth, for each plant functional type (PFT)
creates the raster indicated by `sv_reg['avh2o_3_path']`, available
water in the top two soil layers
Returns:
None
"""
def calc_avg_temp(max_temp, min_temp):
"""Calculate average temperature from maximum and minimum temp."""
valid_mask = (
(~numpy.isclose(max_temp, max_temp_nodata)) &
(~numpy.isclose(min_temp, min_temp_nodata)))
tave = numpy.empty(max_temp.shape, dtype=numpy.float32)
tave[:] = _IC_NODATA
tave[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.
return tave
def calc_surface_litter_biomass(strucc_1, metabc_1):
"""Calculate biomass in surface litter."""
valid_mask = (
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(~numpy.isclose(metabc_1, _SV_NODATA)))
alit = numpy.empty(strucc_1.shape, dtype=numpy.float32)
alit[:] = _TARGET_NODATA
alit[valid_mask] = (strucc_1[valid_mask] + metabc_1[valid_mask]) * 2.5
alit = numpy.minimum(alit, 400)
return alit
max_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
# get max number of soil layers accessible by plants
nlaypg_max = int(max(val['nlaypg'] for val in veg_trait_table.values()))
# max number of soil layers simulated, beyond those accessible by plants
nlayer_max = int(max(val['nlayer'] for val in site_param_table.values()))
# temporary intermediate rasters for soil water submodel
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'tave', 'current_moisture_inputs', 'modified_moisture_inputs',
'pet_rem', 'alit', 'sum_aglivc', 'sum_stdedc', 'sum_tgprod',
'aliv', 'sd', 'absevap', 'evap_losses', 'trap', 'trap_revised',
'pevp', 'tot', 'tot2', 'rwcf_1', 'evlos', 'avinj_interim_1']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
# temporary intermediate values for each layer accessible by plants
for val in ['avw', 'awwt', 'avinj']:
for lyr in range(1, nlaypg_max + 1):
val_lyr = '{}_{}'.format(val, lyr)
temp_val_dict[val_lyr] = os.path.join(
temp_dir, '{}.tif'.format(val_lyr))
# temporary intermediate value for each layer total
for lyr in range(1, nlayer_max + 1):
val_lyr = 'asmos_interim_{}'.format(lyr)
temp_val_dict[val_lyr] = os.path.join(
temp_dir, '{}.tif'.format(val_lyr))
# PFT-level temporary calculated values
for pft_i in pft_id_set:
for val in ['tgprod_weighted', 'sum_avinj']:
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict = {}
for val in ['fracro', 'precro', 'fwloss_1', 'fwloss_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for lyr in range(1, nlaypg_max + 1):
val_lyr = 'awtl_{}'.format(lyr)
target_path = os.path.join(temp_dir, '{}.tif'.format(val_lyr))
param_val_dict[val_lyr] = target_path
site_to_val = dict(
[(site_code, float(table[val_lyr])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
for lyr in range(1, nlayer_max + 1):
val_lyr = 'adep_{}'.format(lyr)
target_path = os.path.join(temp_dir, '{}.tif'.format(val_lyr))
param_val_dict[val_lyr] = target_path
site_to_val = dict(
[(site_code, float(table[val_lyr])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# calculate canopy and litter cover that influence moisture inputs
# calculate biomass in surface litter
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['strucc_1_path'], prev_sv_reg['metabc_1_path']]],
calc_surface_litter_biomass, temp_val_dict['alit'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate the sum of aglivc (standing live biomass) and stdedc
# (standing dead biomass) across PFTs, weighted by % cover of each PFT
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
# calculate the weighted sum of tgprod, potential production, across PFTs
weighted_path_list = []
for pft_i in pft_id_set:
do_growth = (
current_month != veg_trait_table[pft_i]['senescence_month'] and
str(current_month) in veg_trait_table[pft_i]['growth_months'])
if do_growth:
target_path = temp_val_dict['tgprod_weighted_{}'.format(pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
month_reg['tgprod_{}'.format(pft_i)], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
if weighted_path_list:
raster_list_sum(
weighted_path_list, _TARGET_NODATA,
temp_val_dict['sum_tgprod'], _TARGET_NODATA, nodata_remove=True)
else: # no potential production occurs this month, so tgprod = 0
pygeoprocessing.new_raster_from_base(
temp_val_dict['sum_aglivc'], temp_val_dict['sum_tgprod'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0.])
# calculate average temperature
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)]]],
calc_avg_temp, temp_val_dict['tave'], gdal.GDT_Float32, _IC_NODATA)
# calculate aboveground live biomass
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'], temp_val_dict['sum_tgprod']]],
_calc_aboveground_live_biomass, temp_val_dict['aliv'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate total standing biomass
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aliv'], temp_val_dict['sum_stdedc']]],
_calc_standing_biomass, temp_val_dict['sd'],
gdal.GDT_Float32, _TARGET_NODATA)
# modify standing snow, liquid in snow, return moisture inputs after snow
_snow(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['precip_{}'.format(month_index)],
temp_val_dict['tave'],
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
prev_sv_reg['snow_path'], prev_sv_reg['snlq_path'],
current_month, month_reg['snowmelt'], sv_reg['snow_path'],
sv_reg['snlq_path'], temp_val_dict['modified_moisture_inputs'],
temp_val_dict['pet_rem'])
# remove runoff and surface evaporation from moisture inputs
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('inputs_after_surface'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate bare soil evaporation
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('absevap'),
temp_val_dict['absevap'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate total losses to surface evaporation
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['current_moisture_inputs'],
param_val_dict['fracro'], param_val_dict['precro'],
sv_reg['snow_path'], temp_val_dict['alit'],
temp_val_dict['sd'], param_val_dict['fwloss_1'],
param_val_dict['fwloss_2'], temp_val_dict['pet_rem']]],
subtract_surface_losses('evap_losses'),
temp_val_dict['evap_losses'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove losses due to initial transpiration from water inputs
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('modified_moisture_inputs'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate potential transpiration
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('trap'), temp_val_dict['trap'],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate potential evaporation from top soil layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pet_rem'], temp_val_dict['evap_losses'],
temp_val_dict['tave'], temp_val_dict['aliv'],
temp_val_dict['current_moisture_inputs']]],
calc_potential_transpiration('pevp'), temp_val_dict['pevp'],
gdal.GDT_Float32, _TARGET_NODATA)
# distribute water to each layer
for lyr in range(1, nlayer_max + 1):
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
temp_val_dict['current_moisture_inputs'])
# revise moisture content of this soil layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['adep_{}'.format(lyr)],
pp_reg['afiel_{}_path'.format(lyr)],
prev_sv_reg['asmos_{}_path'.format(lyr)],
temp_val_dict['current_moisture_inputs']]],
distribute_water_to_soil_layer('asmos_revised'),
temp_val_dict['asmos_interim_{}'.format(lyr)],
gdal.GDT_Float32, _TARGET_NODATA)
# calculate soil moisture moving to next layer
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['adep_{}'.format(lyr)],
pp_reg['afiel_{}_path'.format(lyr)],
prev_sv_reg['asmos_{}_path'.format(lyr)],
temp_val_dict['current_moisture_inputs']]],
distribute_water_to_soil_layer('amov'),
temp_val_dict['modified_moisture_inputs'],
gdal.GDT_Float32, _TARGET_NODATA)
# amov, water moving to next layer, persists between submodels
shutil.copyfile(
temp_val_dict['modified_moisture_inputs'],
month_reg['amov_{}'.format(lyr)])
# calculate available water for transpiration
avw_list = []
for lyr in range(1, nlaypg_max + 1):
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)]]],
calc_available_water_for_transpiration,
temp_val_dict['avw_{}'.format(lyr)], gdal.GDT_Float32,
_TARGET_NODATA)
avw_list.append(temp_val_dict['avw_{}'.format(lyr)])
# total water available for transpiration
raster_list_sum(
avw_list, _TARGET_NODATA, temp_val_dict['tot'], _TARGET_NODATA)
# calculate water available for transpiration weighted by transpiration
# depth for that soil layer
awwt_list = []
for lyr in range(1, nlaypg_max + 1):
raster_multiplication(
temp_val_dict['avw_{}'.format(lyr)], _TARGET_NODATA,
param_val_dict['awtl_{}'.format(lyr)], _IC_NODATA,
temp_val_dict['awwt_{}'.format(lyr)], _TARGET_NODATA)
awwt_list.append(temp_val_dict['awwt_{}'.format(lyr)])
# total weighted available water for transpiration
raster_list_sum(
awwt_list, _TARGET_NODATA, temp_val_dict['tot2'], _TARGET_NODATA)
# revise total potential transpiration
pygeoprocessing.raster_calculator(
[(path, 1) for path in [temp_val_dict['trap'], temp_val_dict['tot']]],
revise_potential_transpiration, temp_val_dict['trap_revised'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove water via transpiration
for lyr in range(1, nlaypg_max + 1):
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)],
temp_val_dict['trap_revised'],
temp_val_dict['awwt_{}'.format(lyr)], temp_val_dict['tot2']]],
remove_transpiration('avinj'),
temp_val_dict['avinj_{}'.format(lyr)], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['asmos_interim_{}'.format(lyr)],
pp_reg['awilt_{}_path'.format(lyr)],
param_val_dict['adep_{}'.format(lyr)],
temp_val_dict['trap_revised'],
temp_val_dict['awwt_{}'.format(lyr)], temp_val_dict['tot2']]],
remove_transpiration('asmos'), sv_reg['asmos_{}_path'.format(lyr)],
gdal.GDT_Float32, _TARGET_NODATA)
# no transpiration is removed from layers not accessible by plants
for lyr in range(nlaypg_max + 1, nlayer_max + 1):
shutil.copyfile(
temp_val_dict['asmos_interim_{}'.format(lyr)],
sv_reg['asmos_{}_path'.format(lyr)])
# relative water content of soil layer 1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['asmos_1_path'], param_val_dict['adep_1'],
pp_reg['awilt_1_path'], pp_reg['afiel_1_path']]],
calc_relative_water_content_lyr_1, temp_val_dict['rwcf_1'],
gdal.GDT_Float32, _TARGET_NODATA)
# evaporation from soil layer 1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['rwcf_1'], temp_val_dict['pevp'],
temp_val_dict['absevap'], sv_reg['asmos_1_path'],
pp_reg['awilt_1_path'], param_val_dict['adep_1']]],
calc_evaporation_loss, temp_val_dict['evlos'],
gdal.GDT_Float32, _TARGET_NODATA)
# remove evaporation from total moisture in soil layer 1
shutil.copyfile(sv_reg['asmos_1_path'], temp_val_dict['asmos_interim_1'])
raster_difference(
temp_val_dict['asmos_interim_1'], _TARGET_NODATA,
temp_val_dict['evlos'], _TARGET_NODATA, sv_reg['asmos_1_path'],
_TARGET_NODATA)
# remove evaporation from moisture available to plants in soil layer 1
shutil.copyfile(temp_val_dict['avinj_1'], temp_val_dict['avinj_interim_1'])
raster_difference(
temp_val_dict['avinj_interim_1'], _TARGET_NODATA,
temp_val_dict['evlos'], _TARGET_NODATA, temp_val_dict['avinj_1'],
_TARGET_NODATA)
# calculate avh2o_1, soil water available for growth, for each PFT
for pft_i in pft_id_set:
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
soil_layers_accessible = [
temp_val_dict['avinj_{}'.format(lyr)] for lyr in
range(1, int(veg_trait_table[pft_i]['nlaypg']) + 1)]
raster_list_sum(
soil_layers_accessible, _TARGET_NODATA,
temp_val_dict['sum_avinj_{}'.format(pft_i)],
_TARGET_NODATA, nodata_remove=True)
raster_multiplication(
temp_val_dict['sum_avinj_{}'.format(pft_i)], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
sv_reg['avh2o_1_{}_path'.format(pft_i)], _SV_NODATA)
# calculate avh2o_3, moisture in top two soil layers
soil_layers_to_sum = [
temp_val_dict['avinj_{}'.format(lyr)] for lyr in [1, 2]]
raster_list_sum(
soil_layers_to_sum, _TARGET_NODATA, sv_reg['avh2o_3_path'],
_SV_NODATA, nodata_remove=False)
# set correct nodata value for all revised asmos rasters
for lyr in range(1, nlayer_max + 1):
reclassify_nodata(sv_reg['asmos_{}_path'.format(lyr)], _SV_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_anerb(rprpet, pevap, drain, aneref_1, aneref_2, aneref_3):
"""Calculate the effect of soil anaerobic conditions on decomposition.
The impact of soil anaerobic conditions on decomposition is calculated from
soil moisture and reference evapotranspiration. Anerob.f.
Parameters:
rprpet (numpy.ndarray): derived, ratio of precipitation or snowmelt to
reference evapotranspiration
pevap (numpy.ndarray): derived, reference evapotranspiration
drain (numpy.ndarray): parameter, the fraction of excess water lost by
drainage. Indicates whether a soil is sensitive for anaerobiosis
(drain = 0) or not (drain = 1)
aneref_1 (numpy.ndarray): parameter, value of rprpet below which there
is no negative impact of soil anaerobic conditions on decomposition
aneref_2 (numpy.ndarray): parameter, value of rprpet above which there
is maximum negative impact of soil anaerobic conditions on
decomposition
aneref_3 (numpy.ndarray): parameter, minimum value of the impact of
soil anaerobic conditions on decomposition
Returns:
anerb, the effect of soil anaerobic conditions on decomposition
"""
valid_mask = (
(rprpet != _TARGET_NODATA) &
(pevap != _TARGET_NODATA) &
(drain != _IC_NODATA) &
(aneref_1 != _IC_NODATA) &
(aneref_2 != _IC_NODATA) &
(aneref_3 != _IC_NODATA))
xh2o = numpy.empty(rprpet.shape, dtype=numpy.float32)
xh2o[:] = _TARGET_NODATA
xh2o[valid_mask] = (
(rprpet[valid_mask] - aneref_1[valid_mask]) * pevap[valid_mask] *
(1. - drain[valid_mask]))
anerb = numpy.empty(rprpet.shape, dtype=numpy.float32)
anerb[:] = _TARGET_NODATA
anerb[valid_mask] = 1.
high_rprpet_mask = (valid_mask & (rprpet > aneref_1) & (xh2o > 0))
anerb[high_rprpet_mask] = numpy.maximum(
1. + (1. - aneref_3[high_rprpet_mask]) /
(aneref_1[high_rprpet_mask] - aneref_2[high_rprpet_mask]) *
(aneref_1[high_rprpet_mask] +
(xh2o[high_rprpet_mask] / pevap[high_rprpet_mask]) -
aneref_1[high_rprpet_mask]),
aneref_3[high_rprpet_mask])
return anerb
def esched(return_type):
"""Calculate flow of an element accompanying decomposition of C.
Calculate the movement of one element (N or P) as C decomposes from one
state variable (the donating stock, or box A) to another state variable
(the receiving stock, or box B). Esched.f
Parameters:
return_type (string): flag indicating whether to return material
leaving box A, material arriving in box B, or material flowing
into or out of the mineral pool
Returns:
the function `_esched`
"""
def _esched(cflow, tca, rcetob, anps, labile):
"""Calculate the flow of one element (iel) to accompany decomp of C.
This is a transcription of Esched.f: "Schedule N, P, or S flow and
associated mineralization or immobilization flow for decomposition
from Box A to Box B."
If there is enough of iel (N or P) in the donating stock to satisfy
the required ratio, that material flows from the donating stock to
the receiving stock and whatever iel is leftover goes to mineral
pool. If there is not enough iel to satisfy the required ratio, iel
is drawn from the mineral pool to satisfy the ratio; if there is
not enough iel in the mineral pool, the material does not leave the
donating stock.
Parameters:
cflow (numpy.ndarray): derived, total C that is decomposing from
box A to box B
tca (numpy.ndarray): state variable, C in donating stock, i.e.
box A
rcetob (numpy.ndarray): derived, required ratio of C/iel in the
receiving stock
anps (numpy.ndarray): state variable, iel (N or P) in the donating
stock
labile (numpy.ndarray): state variable, mineral iel (N or P)
Returns:
material_leaving_a, the amount of material leaving box A, if
return_type is 'material_leaving_a'
material_arriving_b, the amount of material arriving in box B,
if return_type is 'material_arriving_b'
mnrflo, flow to or from mineral pool, if return_type is
'mineral_flow'
"""
valid_mask = (
(cflow != _IC_NODATA) &
(~numpy.isclose(tca, _SV_NODATA)) &
(tca > 0) &
(rcetob != _TARGET_NODATA) &
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(labile, _SV_NODATA)))
outofa = numpy.empty(cflow.shape, dtype=numpy.float32)
outofa[:] = _IC_NODATA
outofa[valid_mask] = (
anps[valid_mask] * (cflow[valid_mask] / tca[valid_mask]))
immobil_ratio = numpy.zeros(cflow.shape)
nonzero_mask = ((outofa > 0) & valid_mask)
immobil_ratio[nonzero_mask] = (
cflow[nonzero_mask] / outofa[nonzero_mask])
immflo = numpy.zeros(cflow.shape)
immflo[valid_mask] = (
cflow[valid_mask] / rcetob[valid_mask] - outofa[valid_mask])
labile_supply = numpy.zeros(cflow.shape)
labile_supply[valid_mask] = labile[valid_mask] - immflo[valid_mask]
atob = numpy.zeros(cflow.shape)
atob[valid_mask] = cflow[valid_mask] / rcetob[valid_mask]
# immobilization
immobilization_mask = (
(immobil_ratio > rcetob) &
(labile_supply > 0) &
valid_mask)
# mineralization
mineralization_mask = (
(immobil_ratio <= rcetob) &
valid_mask)
# no movement
no_movt_mask = (
(immobil_ratio > rcetob) &
(labile_supply <= 0) &
valid_mask)
material_leaving_a = numpy.empty(cflow.shape, dtype=numpy.float32)
material_leaving_a[:] = _IC_NODATA
material_arriving_b = numpy.empty(cflow.shape, dtype=numpy.float32)
material_arriving_b[:] = _IC_NODATA
mnrflo = numpy.empty(cflow.shape, dtype=numpy.float32)
mnrflo[:] = _IC_NODATA
material_leaving_a[immobilization_mask] = (
outofa[immobilization_mask])
material_arriving_b[immobilization_mask] = (
outofa[immobilization_mask] + immflo[immobilization_mask])
mnrflo[immobilization_mask] = -immflo[immobilization_mask]
material_leaving_a[mineralization_mask] = outofa[mineralization_mask]
material_arriving_b[mineralization_mask] = atob[mineralization_mask]
mnrflo[mineralization_mask] = (
outofa[mineralization_mask] - atob[mineralization_mask])
material_leaving_a[no_movt_mask] = 0.
material_arriving_b[no_movt_mask] = 0.
mnrflo[no_movt_mask] = 0.
if return_type == 'material_leaving_a':
return material_leaving_a
elif return_type == 'material_arriving_b':
return material_arriving_b
elif return_type == 'mineral_flow':
return mnrflo
return _esched
def fsfunc(minerl_1_2, sorpmx, pslsrb):
"""Calculate the fraction of mineral P that is in solution.
The fraction of P in solution is influenced by two soil properties:
the maximum sorption potential of the soil and sorption affinity.
Parameters:
minerl_1_2 (numpy.ndarray): state variable, mineral P in top layer
sorpmx (numpy.ndarray): parameter, maximum P sorption potential
pslsrb (numpy.ndarray): parameter, slope term which controls the
fraction of mineral P that is labile
Returns:
fsol, fraction of P in solution
"""
valid_mask = (
(~numpy.isclose(minerl_1_2, _SV_NODATA)) &
(minerl_1_2 > 0) &
(sorpmx != _IC_NODATA) &
(pslsrb != _IC_NODATA))
c_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
c_ar[valid_mask] = (
sorpmx[valid_mask] * (2.0 - pslsrb[valid_mask]) / 2.)
b_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
b_ar[valid_mask] = (
sorpmx[valid_mask] - minerl_1_2[valid_mask] + c_ar[valid_mask])
sq_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
sq_ar[valid_mask] = (
b_ar[valid_mask] * b_ar[valid_mask] + 4. * c_ar[valid_mask] *
minerl_1_2[valid_mask])
sqrt_ar = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
sqrt_ar[valid_mask] = numpy.sqrt(sq_ar[valid_mask])
labile = numpy.zeros(minerl_1_2.shape, dtype=numpy.float32)
labile[valid_mask] = (-b_ar[valid_mask] + sqrt_ar[valid_mask]) / 2.
fsol = numpy.empty(minerl_1_2.shape, dtype=numpy.float32)
fsol[:] = _TARGET_NODATA
fsol[valid_mask] = labile[valid_mask] / minerl_1_2[valid_mask]
return fsol
def calc_surface_som2_ratio(
som1c_1, som1e_1_iel, rad1p_1_iel, rad1p_2_iel, rad1p_3_iel,
pcemic1_2_iel):
"""Calculate the required C/iel ratio for material entering surface SOM2.
The C/iel ratio of material decomposing from surface SOM1 into surface SOM2
fluctuates with each decomposition time step according to the current C/iel
content of SOM1.
Parameters:
som1c_1 (numpy.ndarray): state variable, C in surface SOM1
som1e_1_iel (numpy.ndarray): state variable, iel in surface SOM1
rad1p_1_iel (numpy.ndarray): parameter, intercept term
rad1p_2_iel (numpy.ndarray): parameter, slope term
rad1p_3_iel (numpy.ndarray): parameter, minimum allowable C/iel for
addition term
pcemic1_2_iel (numpy.ndarray): parameter, minimum C/iel ratio
Returns:
rceto2_surface, required C/iel ratio of material entering surface SOM2
"""
valid_mask = (
(~numpy.isclose(som1c_1, _SV_NODATA)) &
(~numpy.isclose(som1e_1_iel, _SV_NODATA)) &
(som1e_1_iel > 0) &
(rad1p_1_iel != _IC_NODATA) &
(rad1p_2_iel != _IC_NODATA) &
(pcemic1_2_iel != _IC_NODATA) &
(rad1p_3_iel != _IC_NODATA))
radds1 = numpy.empty(som1c_1.shape, dtype=numpy.float32)
radds1[:] = _TARGET_NODATA
radds1[valid_mask] = (
rad1p_1_iel[valid_mask] + rad1p_2_iel[valid_mask] *
((som1c_1[valid_mask] / som1e_1_iel[valid_mask]) -
pcemic1_2_iel[valid_mask]))
rceto2_surface = numpy.empty(som1c_1.shape, dtype=numpy.float32)
rceto2_surface[:] = _TARGET_NODATA
rceto2_surface[valid_mask] = numpy.maximum(
(som1c_1[valid_mask] / som1e_1_iel[valid_mask] + radds1[valid_mask]),
rad1p_3_iel[valid_mask])
return rceto2_surface
def calc_tcflow_strucc_1(
aminrl_1, aminrl_2, strucc_1, struce_1_1, struce_1_2, rnewas_1_1,
rnewas_2_1, strmax_1, defac, dec1_1, pligst_1, strlig_1, pheff_struc):
"""Calculate total flow out of surface structural C.
The total potential flow of C out of surface structural material is
calculated according to its lignin content, the decomposition factor, and
soil pH. The actual flow is limited by the availability of N and P. N and P
may be supplied by the mineral source, or by the element (N or P) in the
decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
strucc_1 (numpy.ndarray): state variable, surface structural C
struce_1_1 (numpy.ndarray): state variable, surface structural N
struce_1_2 (numpy.ndarray): state variable, surface structural P
rnewas_1_1 (numpy.ndarray): derived, required C/N ratio for
aboveground material decomposing to SOM1
rnewas_2_1 (numpy.ndarray): derived, required C/P ratio for
aboveground material decomposing to SOM1
strmax_1 (numpy.ndarray): parameter, maximum decomposition amount
defac (numpy.ndarray): derived, decomposition factor
dec1_1 (numpy.ndarray): parameter, maximum decomposition rate
pligst_1 (numpy.ndarray): parameter, effect of lignin content on
decomposition rate
strlig_1 (numpy.ndarray): state variable, lignin content of decomposing
material
pheff_struc (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow_strucc_1, total flow of C out of surface structural
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(~numpy.isclose(struce_1_1, _SV_NODATA)) &
(~numpy.isclose(struce_1_2, _SV_NODATA)) &
(rnewas_1_1 != _TARGET_NODATA) &
(rnewas_2_1 != _TARGET_NODATA) &
(strmax_1 != _IC_NODATA) &
(defac != _TARGET_NODATA) &
(dec1_1 != _IC_NODATA) &
(pligst_1 != _IC_NODATA) &
(~numpy.isclose(strlig_1, _SV_NODATA)) &
(pheff_struc != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(strucc_1[valid_mask], strmax_1[valid_mask]) *
defac[valid_mask] * dec1_1[valid_mask] *
numpy.exp(-pligst_1[valid_mask] * strlig_1[valid_mask]) * 0.020833 *
pheff_struc[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((strucc_1 / struce_1_1) <= rnewas_1_1)) &
((aminrl_2 > 0.0000001) | ((strucc_1 / struce_1_2) <= rnewas_2_1)) &
valid_mask)
tcflow_strucc_1 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_strucc_1[:] = _IC_NODATA
tcflow_strucc_1[valid_mask] = 0.
tcflow_strucc_1[decompose_mask] = potential_flow[decompose_mask]
return tcflow_strucc_1
def calc_tcflow_strucc_2(
aminrl_1, aminrl_2, strucc_2, struce_2_1, struce_2_2, rnewbs_1_1,
rnewbs_2_1, strmax_2, defac, dec1_2, pligst_2, strlig_2, pheff_struc,
anerb):
"""Calculate total flow out of soil structural C.
The total potential flow of C out of soil structural material is
calculated according to its lignin content, the decomposition factor, and
soil pH. The actual flow is limited by the availability of N and P. N and P
may be supplied by the mineral source, or by the element (N or P) in the
decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average soil mineral N
aminrl_2 (numpy.ndarray): derived, average soil mineral P
strucc_2 (numpy.ndarray): state variable, soil structural C
struce_2_1 (numpy.ndarray): state variable, soil structural N
struce_2_2 (numpy.ndarray): state variable, soil structural P
rnewbs_1_1 (numpy.ndarray): derived, required C/N ratio for
belowground material decomposing to SOM1
rnewbs_2_1 (numpy.ndarray): derived, required C/P ratio for
belowground material decomposing to SOM1
strmax_2 (numpy.ndarray): parameter, maximum decomposition amount
defac (numpy.ndarray): derived, decomposition factor
dec1_2 (numpy.ndarray): parameter, maximum decomposition rate
pligst_2 (numpy.ndarray): parameter, effect of lignin content on
decomposition rate
strlig_2 (numpy.ndarray): state variable, lignin content of decomposing
material
pheff_struc (numpy.ndarray): derived, effect of soil pH on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic conditions on
decomposition rate
Returns:
tcflow_strucc_2, total flow of C out of soil structural
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(strucc_2, _SV_NODATA)) &
(~numpy.isclose(struce_2_1, _SV_NODATA)) &
(~numpy.isclose(struce_2_2, _SV_NODATA)) &
(rnewbs_1_1 != _TARGET_NODATA) &
(rnewbs_2_1 != _TARGET_NODATA) &
(strmax_2 != _IC_NODATA) &
(defac != _TARGET_NODATA) &
(dec1_2 != _IC_NODATA) &
(pligst_2 != _IC_NODATA) &
(~numpy.isclose(strlig_2, _SV_NODATA)) &
(pheff_struc != _TARGET_NODATA) &
(anerb != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(strucc_2[valid_mask], strmax_2[valid_mask]) *
defac[valid_mask] * dec1_2[valid_mask] *
numpy.exp(-pligst_2[valid_mask] * strlig_2[valid_mask]) * 0.020833 *
pheff_struc[valid_mask] * anerb[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((strucc_2 / struce_2_1) <= rnewbs_1_1)) &
((aminrl_2 > 0.0000001) | ((strucc_2 / struce_2_2) <= rnewbs_2_1)) &
valid_mask)
tcflow_strucc_2 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_strucc_2[:] = _IC_NODATA
tcflow_strucc_2[valid_mask] = 0.
tcflow_strucc_2[decompose_mask] = potential_flow[decompose_mask]
return tcflow_strucc_2
def calc_tcflow_surface(
aminrl_1, aminrl_2, cstatv, estatv_1, estatv_2, rcetob_1, rcetob_2,
defac, dec_param, pheff):
"""Calculate total flow of C out of a surface pool.
The total potential flow of C out of a surface pool is calculated according
to the decomposition factor and soil pH. The actual flow is limited by the
availability of N and P. N and P may be supplied by the mineral source, or
by the element (N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
cstatv (numpy.ndarray): state variable, C in decomposing pool
estatv_1 (numpy.ndarray): state variable, N in decomposing pool
estatv_2 (numpy.ndarray): state variable, P in decomposing pool
rcetob_1 (numpy.ndarray): derived, required C/N ratio for
material entering the receiving pool
rcetob_2 (numpy.ndarray): derived, required C/P ratio for
material entering the receiving pool
defac (numpy.ndarray): derived, decomposition factor
dec_param (numpy.ndarray): parameter, maximum decomposition rate
pheff (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow, total flow of C out of the decomposing pool
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)) &
(~numpy.isclose(estatv_1, _SV_NODATA)) &
(~numpy.isclose(estatv_2, _SV_NODATA)) &
(rcetob_1 != _TARGET_NODATA) &
(rcetob_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec_param != _IC_NODATA) &
(pheff != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(
cstatv[valid_mask] * defac[valid_mask] * dec_param[valid_mask] *
0.020833 * pheff[valid_mask], cstatv[valid_mask]))
decompose_mask = (
((aminrl_1 > 0.0000001) | ((cstatv / estatv_1) <= rcetob_1)) &
((aminrl_2 > 0.0000001) | ((cstatv / estatv_2) <= rcetob_2)) &
valid_mask)
tcflow = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow[:] = _IC_NODATA
tcflow[valid_mask] = 0.
tcflow[decompose_mask] = potential_flow[decompose_mask]
return tcflow
def calc_tcflow_soil(
aminrl_1, aminrl_2, cstatv, estatv_1, estatv_2, rcetob_1,
rcetob_2, defac, dec_param, pheff, anerb):
"""Calculate total flow out of soil metabolic C.
The total potential flow of C out of soil metabolic material is
calculated according to the decomposition factor, soil pH, and soil
anaerobic conditions. The actual flow is limited by the availability of N
and P. N and P may be supplied by the mineral source, or by the element
(N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average soil mineral N
aminrl_2 (numpy.ndarray): derived, average soil mineral P
cstatv (numpy.ndarray): state variable, C in decomposing stock
estatv_1 (numpy.ndarray): state variable, N in decomposing stock
estatv_2 (numpy.ndarray): state variable, P in decomposing stock
rcetob_1 (numpy.ndarray): derived, required C/N ratio for
material entering receiving stock
rceto1_2 (numpy.ndarray): derived, required C/P ratio for
material entering receiving stock
defac (numpy.ndarray): derived, decomposition factor
dec_param (numpy.ndarray): parameter, maximum decomposition rate
pheff (numpy.ndarray): derived, effect of soil pH on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic
conditions on decomposition rate
Returns:
tcflow_soil, total flow of C out of soil metabolic
material
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)) &
(~numpy.isclose(estatv_1, _SV_NODATA)) &
(~numpy.isclose(estatv_2, _SV_NODATA)) &
(rcetob_1 != _TARGET_NODATA) &
(rcetob_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec_param != _IC_NODATA) &
(pheff != _TARGET_NODATA) &
(anerb != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
numpy.minimum(
cstatv[valid_mask] * defac[valid_mask] * dec_param[valid_mask] *
0.020833 * pheff[valid_mask] * anerb[valid_mask],
cstatv[valid_mask]))
decompose_mask = (
((aminrl_1 > 0.0000001) | ((cstatv / estatv_1) <= rcetob_1)) &
((aminrl_2 > 0.0000001) | ((cstatv / estatv_2) <= rcetob_2)) &
valid_mask)
tcflow_soil = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_soil[:] = _IC_NODATA
tcflow_soil[valid_mask] = 0.
tcflow_soil[decompose_mask] = potential_flow[decompose_mask]
return tcflow_soil
def calc_tcflow_som1c_2(
aminrl_1, aminrl_2, som1c_2, som1e_2_1, som1e_2_2, rceto2_1,
rceto2_2, defac, dec3_2, eftext, anerb, pheff_metab):
"""Calculate total flow out of soil SOM1.
The total potential flow of C out of soil SOM1 is calculated
according to the effect of soil texture, anaerobic conditions,
and soil pH. The actual flow is limited by the availability of N
and P. N and P may be supplied by the mineral source, or by the
element (N or P) in the decomposing stock.
Parameters:
aminrl_1 (numpy.ndarray): derived, average surface mineral N
aminrl_2 (numpy.ndarray): derived, average surface mineral P
som1c_2 (numpy.ndarray): state variable, C in soil SOM1
som1e_2_1 (numpy.ndarray): state variable, N in soil SOM1
som1e_2_2 (numpy.ndarray): state variable, P in soil SOM1
rceto2_1 (numpy.ndarray): derived, required C/N ratio for
material decomposing to soil SOM2
rceto2_2 (numpy.ndarray): derived, required C/P ratio for
material decomposing to soil SOM2
defac (numpy.ndarray): derived, decomposition factor
dec3_2 (numpy.ndarray): parameter, maximum decomposition rate
eftext (numpy.ndarray): derived, effect of soil texture on
decomposition rate
anerb (numpy.ndarray): derived, effect of soil anaerobic conditions
on decomposition rate
pheff_metab (numpy.ndarray): derived, effect of soil pH on
decomposition rate
Returns:
tcflow_som1c_2, total flow of C out of soil SOM1
"""
valid_mask = (
(~numpy.isclose(aminrl_1, _SV_NODATA)) &
(~numpy.isclose(aminrl_2, _SV_NODATA)) &
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som1e_2_1, _SV_NODATA)) &
(~numpy.isclose(som1e_2_2, _SV_NODATA)) &
(rceto2_1 != _TARGET_NODATA) &
(rceto2_2 != _TARGET_NODATA) &
(defac != _TARGET_NODATA) &
(dec3_2 != _IC_NODATA) &
(eftext != _TARGET_NODATA) &
(anerb != _TARGET_NODATA) &
(pheff_metab != _TARGET_NODATA))
potential_flow = numpy.zeros(aminrl_1.shape, dtype=numpy.float32)
potential_flow[valid_mask] = (
som1c_2[valid_mask] * defac[valid_mask] * dec3_2[valid_mask] *
eftext[valid_mask] * anerb[valid_mask] * 0.020833 *
pheff_metab[valid_mask])
decompose_mask = (
((aminrl_1 > 0.0000001) | ((som1c_2 / som1e_2_1) <= rceto2_1)) &
((aminrl_2 > 0.0000001) | ((som1c_2 / som1e_2_2) <= rceto2_2)) &
valid_mask)
tcflow_som1c_2 = numpy.empty(aminrl_1.shape, dtype=numpy.float32)
tcflow_som1c_2[:] = _IC_NODATA
tcflow_som1c_2[valid_mask] = 0.
tcflow_som1c_2[decompose_mask] = potential_flow[decompose_mask]
return tcflow_som1c_2
def calc_som3_flow(tcflow, fps, animpt, anerb):
"""Calculate the C that flows from soil SOM1 or SOM2 to SOM3.
The fraction of total flow leaving SOM1 or SOM2 that goes to SOM3 is
dependent on soil clay content and soil anaerobic conditions.
Parameters:
tcflow (numpy.ndarray): derived, total C leaving soil SOM1 or SOM2
fps (numpy.ndarray): derived, effect of soil clay content on
decomposition to SOM3
animpt (numpy.ndarray): parameter, slope of relationship between
anaerobic conditions and decomposition flow to SOM3
anerb (numpy.ndarray): derived, impact of soil anaerobic conditions
on decomposition
Returns:
tosom3, C flowing to SOM3
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(fps != _IC_NODATA) &
(animpt != _IC_NODATA) &
(anerb != _TARGET_NODATA))
tosom3 = numpy.empty(tcflow.shape, dtype=numpy.float32)
tosom3[:] = _IC_NODATA
tosom3[valid_mask] = (
tcflow[valid_mask] * fps[valid_mask] *
(1. + animpt[valid_mask] * (1. - anerb[valid_mask])))
return tosom3
def calc_som2_flow(som2c_1, cmix, defac):
"""Calculate the C that flows from surface SOM2 to soil SOM2.
Some C flows from surface SOM2 to soil SOM2 via mixing. This flow is
controlled by the parameter cmix.
Parameters:
som2c_1 (numpy.ndarray): state variable, C in surface SOM2
cmix (numpy.ndarray): parameter, amount of C flowing via mixing
defac (numpy.ndarray): derived, decomposition factor
Returns:
tcflow, C flowing to soil SOM2 via mixing
"""
valid_mask = (
(~numpy.isclose(som2c_1, _SV_NODATA)) &
(cmix != _IC_NODATA) &
(defac != _TARGET_NODATA))
tcflow = numpy.empty(som2c_1.shape, dtype=numpy.float32)
tcflow[:] = _IC_NODATA
tcflow[valid_mask] = (
som2c_1[valid_mask] * cmix[valid_mask] * defac[valid_mask] *
0.020833)
return tcflow
def calc_respiration_mineral_flow(cflow, frac_co2, estatv, cstatv):
"""Calculate mineral flow of one element associated with respiration.
As material decomposes from one stock to another, some CO2 is lost
to microbial respiration and some nutrient (N or P) moves to the
mineral pool. Respir.f
Parameters:
cflow (numpy.ndarray): derived, C decomposing from one stock
to another
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
estatv (numpy.ndarray): state variable, iel (N or P) in the
decomposing stock
cstatv (numpy.ndarray): state variable, C in the decomposing
stock
Returns:
mineral_flow, flow of iel (N or P) accompanying respiration
"""
valid_mask = (
(cflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(~numpy.isclose(estatv, _SV_NODATA)) &
(~numpy.isclose(cstatv, _SV_NODATA)))
co2_loss = numpy.zeros(cflow.shape, dtype=numpy.float32)
co2_loss[valid_mask] = cflow[valid_mask] * frac_co2[valid_mask]
mineral_flow = numpy.empty(cflow.shape, dtype=numpy.float32)
mineral_flow[:] = _IC_NODATA
mineral_flow[valid_mask] = 0.
flow_mask = ((cstatv > 0) & valid_mask)
mineral_flow[flow_mask] = (
co2_loss[flow_mask] * estatv[flow_mask] / cstatv[flow_mask])
return mineral_flow
def update_gross_mineralization(gross_mineralization, mineral_flow):
"""Update gross N mineralization with current mineral flow.
Gross mineralization of N during decomposition is used to calculate
volatilization loss of N after decomposition. It is updated with N
mineral flow if mineral flow is positive.
Parameters:
gross_mineralization (numpy.ndarray): gross N mineralization during
decomposition
mineral_flow (numpy.ndarray): N mineral flow
Returns:
gromin_updated, updated gross mineralization
"""
valid_mask = (
(gross_mineralization != _TARGET_NODATA) &
(mineral_flow != _IC_NODATA))
gromin_updated = numpy.empty(
gross_mineralization.shape, dtype=numpy.float32)
gromin_updated[:] = _TARGET_NODATA
gromin_updated[valid_mask] = gross_mineralization[valid_mask]
update_mask = ((mineral_flow > 0) & valid_mask)
gromin_updated[update_mask] = (
gross_mineralization[update_mask] + mineral_flow[update_mask])
return gromin_updated
def calc_net_cflow(cflow, frac_co2):
"""Calculate net flow of C after loss to CO2.
As material decomposes from one stock to another, some C is lost to
CO2 through microbial respiration. Calculate the net flow of C after
subtracting losses to CO2.
Parameters:
cflow (numpy.ndarray): derived, C decomposing from one stock
to another
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
Returns:
net_cflow, amount of decomposing C that flows after accounting
for CO2 losses
"""
valid_mask = (
(cflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA))
co2_loss = numpy.zeros(cflow.shape, dtype=numpy.float32)
co2_loss[valid_mask] = cflow[valid_mask] * frac_co2[valid_mask]
net_cflow = numpy.empty(cflow.shape, dtype=numpy.float32)
net_cflow[:] = _IC_NODATA
net_cflow[valid_mask] = cflow[valid_mask] - co2_loss[valid_mask]
return net_cflow
def calc_net_cflow_tosom2(tcflow, frac_co2, tosom3, cleach):
"""Calculate net flow of C from soil SOM1 to soil SOM2.
The C flowing from soil SOM1 to SOM2 is the remainder of total flow
from SOM1, after accounting for losses to CO2 through respiration,
decomposition to SOM3, and leaching.
Parameters:
tcflow (numpy.ndarray): derived, total C decomposing from soil
SOM1
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
tosom3 (numpy.ndarray): derived, C flowing from SOM1 to SOM3
cleach (numpy.ndarray): derived, leached organic C
Returns:
net_tosom2, amount of C that flows from soil SOM1 to soil SOm2
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(tosom3 != _IC_NODATA) &
(cleach != _TARGET_NODATA))
net_tosom2 = numpy.empty(tcflow.shape, dtype=numpy.float32)
net_tosom2[:] = _IC_NODATA
net_tosom2[valid_mask] = (
tcflow[valid_mask] - (tcflow[valid_mask] * frac_co2[valid_mask]) -
tosom3[valid_mask] - cleach[valid_mask])
return net_tosom2
def calc_net_cflow_tosom1(tcflow, frac_co2, tosom3):
"""Calculate net flow of C from soil SOM2 to soil SOM1.
The C flowing from soil SOM2 to SOM1 is the remainder of total flow
from SOM2, after accounting for losses to CO2 through respiration
and decomposition to SOM3.
Parameters:
tcflow (numpy.ndarray): derived, total C decomposing from soil
SOM1
frac_co2 (numpy.ndarray): parameter, fraction of decomposing
C lost as CO2
tosom3 (numpy.ndarray): derived, C flowing from SOM1 to SOM3
Returns:
net_tosom1, amount of C that flows from soil SOM2 to soil SOM1
"""
valid_mask = (
(tcflow != _IC_NODATA) &
(frac_co2 != _IC_NODATA) &
(tosom3 != _IC_NODATA))
net_tosom1 = numpy.empty(tcflow.shape, dtype=numpy.float32)
net_tosom1[:] = _IC_NODATA
net_tosom1[valid_mask] = (
tcflow[valid_mask] - (tcflow[valid_mask] * frac_co2[valid_mask]) -
tosom3[valid_mask])
return net_tosom1
def respiration(
tcflow_path, frac_co2_path, cstatv_path, estatv_path,
delta_estatv_path, delta_minerl_1_iel_path, gromin_1_path=None):
"""Calculate and apply flow of N or P during respiration.
Microbial respiration accompanies decomposition of most stocks.
Calculate the flow of one element (N or P) to the mineral pool, which
accompanies this respiration.
Parameters:
tcflow_path (string): path to raster containing flow of C that
is accompanied by respiration
frac_co2_path (string): path to raster containing fraction of
C lost to co2
cstatv_path (string): path to raster containing C state variable
of decomposing pool
estatv_path (string): path to raster containing iel (N or P) state
variable of decomposing pool
delta_estatv_path (string): path to raster containing change
in the iel state variable of decomposing pool
delta_minerl_1_iel_path (string): path to raster containing
change in surface mineral iel
gromin_1_path (string): path to raster containing gross
mineralization of N
Side effects:
modifies or creates the raster indicated by `delta_estatv_path`
modifies or creates the raster indicated by `delta_minerl_1_iel_path`
modifies or creates the raster indicated by `gromin_1_path`, if
supplied
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='d_statv_temp', dir=PROCESSING_DIR) as d_statv_temp_file:
d_statv_temp_path = d_statv_temp_file.name
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
tcflow_path, frac_co2_path, estatv_path,
cstatv_path]],
calc_respiration_mineral_flow, operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
# mineral flow is removed from the decomposing iel state variable
shutil.copyfile(delta_estatv_path, d_statv_temp_path)
raster_difference(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
delta_estatv_path, _IC_NODATA)
# mineral flow is added to surface mineral iel
shutil.copyfile(delta_minerl_1_iel_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
delta_minerl_1_iel_path, _IC_NODATA)
if gromin_1_path:
shutil.copyfile(gromin_1_path, d_statv_temp_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
d_statv_temp_path,
operand_temp_path]],
update_gross_mineralization, gromin_1_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up
os.remove(operand_temp_path)
os.remove(d_statv_temp_path)
def nutrient_flow(
cflow_path, cstatv_donating_path, estatv_donating_path, rcetob_path,
minerl_1_path, d_estatv_donating_path, d_estatv_receiving_path,
d_minerl_path, gromin_path=None):
"""Calculate and apply the flow of one nutrient accompanying C.
As C decomposes from one compartment to another, nutrients (N and P)
also flow from the donating compartment to the receiving compartment.
Some N or P may also flow to or from the mineral pool. Calculate and
apply the flow of iel (N or P) accompanying the given flow of C.
Parameters:
cflow_path (string): path to raster containing the flow of C
from the donating to the receiving pool
cstatv_donating_path (string): path to raster containing the C
state variable in the donating pool
estatv_donating_path (string): path to raster containing the iel
(N or P) in the donating pool
rcetob_path (string): path to raster containing required C/iel
ratio in the receiving pool
minerl_1_path (string): path to raster containing surface mineral iel
d_estatv_donating_path (string): path to raster containing change
in iel in the donating pool
d_estatv_receiving_path (string): path to raster containing change
in iel in the receiving pool
d_minerl_path (string): path to raster containing change in surface
mineral iel
gromin_path (string): path to raster containing gross mineralization
of N
Side effects:
modifies or creates the raster indicated by `d_estatv_donating_path`
modifies or creates the raster indicated by `d_estatv_receiving_path`
modifies or creates the raster indicated by `d_minerl_path`
modifies or creates the raster indicated by `gromin_path`, if supplied
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='d_statv_temp', dir=PROCESSING_DIR) as d_statv_temp_file:
d_statv_temp_path = d_statv_temp_file.name
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('material_leaving_a'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_estatv_donating_path, d_statv_temp_path)
raster_difference(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_estatv_donating_path, _IC_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('material_arriving_b'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_estatv_receiving_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_estatv_receiving_path, _IC_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cflow_path, cstatv_donating_path, rcetob_path,
estatv_donating_path, minerl_1_path]],
esched('mineral_flow'), operand_temp_path, gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(d_minerl_path, d_statv_temp_path)
raster_sum(
d_statv_temp_path, _IC_NODATA, operand_temp_path, _IC_NODATA,
d_minerl_path, _IC_NODATA)
if gromin_path:
shutil.copyfile(gromin_path, d_statv_temp_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
d_statv_temp_path, operand_temp_path]],
update_gross_mineralization, gromin_path,
gdal.GDT_Float32, _TARGET_NODATA)
# clean up
os.remove(operand_temp_path)
os.remove(d_statv_temp_path)
def calc_c_leach(amov_2, tcflow, omlech_3, orglch):
"""Calculate the amount of C leaching from soil SOM1 to stream flow.
Some C leaches from soil SOM1 if the water flow out of soil layer 2
is above a critical level.
Parameters:
amov_2 (numpy.ndarray): derived, moisture flowing out of soil layer
2
tcflow (numpy.ndarray): derived, total flow of C out of soil SOM1
omlech_3 (numpy.ndarray): parameter, threshold value for amov_2
orglch (numpy.ndarray): derived, effect of sand content on leaching
rate
Returns:
cleach, C leaching from soil SOM1 to stream flow
"""
valid_mask = (
(amov_2 != _TARGET_NODATA) &
(tcflow != _IC_NODATA) &
(omlech_3 != _IC_NODATA) &
(orglch != _IC_NODATA))
cleach = numpy.empty(amov_2.shape, dtype=numpy.float32)
cleach[:] = _TARGET_NODATA
cleach[valid_mask] = 0
linten = numpy.zeros(amov_2.shape)
linten[valid_mask] = numpy.minimum(
(1. - (omlech_3[valid_mask] - amov_2[valid_mask]) /
omlech_3[valid_mask]), 1.)
leach_mask = ((amov_2 > 0) & valid_mask)
cleach[leach_mask] = (
tcflow[leach_mask] * orglch[leach_mask] * linten[leach_mask])
return cleach
def remove_leached_iel(
som1c_2_path, som1e_2_iel_path, cleach_path, d_som1e_2_iel_path,
iel):
"""Remove N or P leached from soil SOM1.
As soil SOM1 decomposes into SOM3, some of N and P is lost from SOM1
through leaching. The amount lost is calculated from the amount of C
leaching from the soil and the proportion of iel (N or P) in soil SOM1.
Parameters:
som1c_2_path (string): path to raster containing C in soil SOM1
som1e_2_iel_path (string): path to raster containing iel in soil
SOM1
cleach_path (string): path to raster containing C leaching from
SOM1
d_som1e_2_iel_path (string): path to raster giving change in
som1e_2_iel
iel (int): index indicating N (iel == 1) or P (iel == 2))
Side effects:
modifies the raster indicated by `d_som1e_2_iel_path`
Returns:
None
"""
def calc_leached_N(som1c_2, som1e_2_1, cleach):
"""Calculate the N leaching from soil SOM1."""
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som1e_2_1, _SV_NODATA)) &
(som1c_2 > 0) &
(som1e_2_1 > 0) &
(cleach != _TARGET_NODATA))
rceof1_1 = numpy.zeros(som1c_2.shape)
rceof1_1[valid_mask] = som1c_2[valid_mask] / som1e_2_1[valid_mask] * 2.
orgflow = numpy.empty(som1c_2.shape, dtype=numpy.float32)
orgflow[:] = _IC_NODATA
orgflow[valid_mask] = cleach[valid_mask] / rceof1_1[valid_mask]
return orgflow
def calc_leached_P(som1c_2, som1e_2_2, cleach):
"""Calculate the P leaching from soil SOM1."""
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som1e_2_2, _SV_NODATA)) &
(som1c_2 > 0) &
(som1e_2_2 > 0) &
(cleach != _TARGET_NODATA))
rceof1_2 = numpy.zeros(som1c_2.shape)
rceof1_2[valid_mask] = (
som1c_2[valid_mask] / som1e_2_2[valid_mask] * 35.)
orgflow = numpy.empty(som1c_2.shape, dtype=numpy.float32)
orgflow[:] = _IC_NODATA
orgflow[valid_mask] = cleach[valid_mask] / rceof1_2[valid_mask]
return orgflow
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='d_statv_temp', dir=PROCESSING_DIR) as d_statv_temp_file:
d_statv_temp_path = d_statv_temp_file.name
if iel == 1:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som1e_2_iel_path, cleach_path]],
calc_leached_N, operand_temp_path,
gdal.GDT_Float32, _TARGET_NODATA)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som1e_2_iel_path, cleach_path]],
calc_leached_P, operand_temp_path,
gdal.GDT_Float32, _TARGET_NODATA)
# remove leached iel from SOM1
shutil.copyfile(d_som1e_2_iel_path, d_statv_temp_path)
raster_difference(
d_statv_temp_path, _IC_NODATA,
operand_temp_path, _IC_NODATA,
d_som1e_2_iel_path, _IC_NODATA)
# clean up
os.remove(operand_temp_path)
os.remove(d_statv_temp_path)
def calc_pflow(pstatv, rate_param, defac):
"""Calculate the flow of mineral P flowing from one pool to another.
Mineral P contains multiple pools, including parent material, labile P,
sorbed and strongly sorbed P, and occluded P. Calculate the flow from one
mineral P pool to another.
Parameters:
pstatv (numpy.ndarray): state variable, P in donating mineral pool
rate_param (numpy.ndarray): parameter, base rate of flow
defac (numpy.ndarray): derived, decomposition rate
Returns:
pflow, mineral P flowing from donating to receiving pool
"""
valid_mask = (
(~numpy.isclose(pstatv, _SV_NODATA)) &
(rate_param != _IC_NODATA) &
(defac != _TARGET_NODATA))
pflow = numpy.empty(pstatv.shape, dtype=numpy.float64)
pflow[:] = _IC_NODATA
pflow[valid_mask] = (
pstatv[valid_mask] * rate_param[valid_mask] * defac[valid_mask] *
0.020833)
return pflow
def calc_pflow_to_secndy(minerl_lyr_2, pmnsec_2, fsol, defac):
"""Calculate the flow of mineral to secondary P in one soil layer.
P flows from the mineral pool of each soil layer into secondary P (strongly
sorbed P) according to the amount in the mineral pool and the amount of P
in solution.
Parameters:
minerl_lyr_2 (numpy.ndarray): state variable, mineral P in soil layer
lyr
pmnsec_2 (numpy.ndarray): parameter, base flow rate
fsol (numpy.ndarray): derived, fraction of P in solution
defac (numpy.ndarray): derived, decomposition factor
Returns:
fmnsec, flow of mineral P to secondary in one soil layer
"""
valid_mask = (
(~numpy.isclose(minerl_lyr_2, _SV_NODATA)) &
(pmnsec_2 != _IC_NODATA) &
(fsol != _TARGET_NODATA) &
(defac != _TARGET_NODATA))
fmnsec = numpy.empty(minerl_lyr_2.shape, dtype=numpy.float64)
fmnsec[:] = _IC_NODATA
fmnsec[valid_mask] = (
pmnsec_2[valid_mask] * minerl_lyr_2[valid_mask] *
(1. - fsol[valid_mask]) * defac[valid_mask] * 0.020833)
return fmnsec
def update_aminrl(
minerl_1_1_path, minerl_1_2_path, fsol_path, aminrl_1_path,
aminrl_2_path):
"""Update aminrl_1 and aminrl_2, average mineral N and P in surface soil.
Aminrl_1, average mineral N, and aminrl_2, average mineral P, represent
labile N or P available for decomposition. They are kept as a running
average of the minerl_1_1 (for N) or minerl_1_2 (for P) state variable
across decomposition time steps.
Parameters:
minerl_1_1_path (string): path to raster giving current mineral N
in soil layer 1
minerl_1_2_path (string): path to raster giving current mineral N
in soil layer 2
fsol_path (string): path to raster giving fraction of mineral P in
solution
aminrl_1_path (string): path to raster containing average mineral N
aminrl_2_path (string): path to raster containing average mineral P
Side effects:
modifies or creates the raster indicated by `aminrl_1_path`
modifies or creates the raster indicated by `aminrl_2_path
Returns:
None
"""
def update_aminrl_1(aminrl_1_prev, minerl_1_1):
"""Update average mineral N."""
valid_mask = (
(~numpy.isclose(aminrl_1_prev, _SV_NODATA)) &
(~numpy.isclose(minerl_1_1, _SV_NODATA)))
aminrl_1 = numpy.empty(aminrl_1_prev.shape, dtype=numpy.float32)
aminrl_1[:] = _SV_NODATA
aminrl_1[valid_mask] = (
aminrl_1_prev[valid_mask] + minerl_1_1[valid_mask] / 2.)
return aminrl_1
def update_aminrl_2(aminrl_2_prev, minerl_1_2, fsol):
"""Update average mineral P.
Average mineral P is calculated from the fraction of mineral P in
soil layer 1 that is in solution.
Parameters:
aminrl_2_prev (numpy.ndarray): derived, previous average surface
mineral P
minerl_1_2 (numpy.ndarray): state variable, current mineral P in
soil layer 1
fsol (numpy.ndarray): derived, fraction of labile P in solution
Returns:
aminrl_2, updated average mineral P
"""
valid_mask = (
(~numpy.isclose(aminrl_2_prev, _SV_NODATA)) &
(~numpy.isclose(minerl_1_2, _SV_NODATA)) &
(fsol != _TARGET_NODATA))
aminrl_2 = numpy.empty(aminrl_2_prev.shape, dtype=numpy.float32)
aminrl_2[:] = _SV_NODATA
aminrl_2[valid_mask] = (
aminrl_2_prev[valid_mask] +
(minerl_1_2[valid_mask] * fsol[valid_mask]) / 2.)
return aminrl_2
with tempfile.NamedTemporaryFile(
prefix='aminrl_prev', dir=PROCESSING_DIR) as aminrl_prev_file:
aminrl_prev_path = aminrl_prev_file.name
shutil.copyfile(aminrl_1_path, aminrl_prev_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [aminrl_prev_path, minerl_1_1_path]],
update_aminrl_1, aminrl_1_path, gdal.GDT_Float32, _SV_NODATA)
shutil.copyfile(aminrl_2_path, aminrl_prev_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aminrl_prev_path, minerl_1_2_path, fsol_path]],
update_aminrl_2, aminrl_2_path, gdal.GDT_Float32, _SV_NODATA)
# clean up
os.remove(aminrl_prev_path)
def sum_biomass(
weighted_live_c, weighted_dead_c, strucc_1, metabc_1, elitst):
"""Calculate total biomass for the purposes of soil shading.
Total aboveground biomass for the purposes of soil shading is the sum
of live biomass, standing dead biomass, and litter. The impact of
litter is modifed by the parameter elitst.
Parameters:
weighted_live_c (numpy.ndarray): derived, sum of the state variable
aglivc across plant functional types
weighted_dead_c (numpy.ndarray): derived, sum of the state variable
stdedc across plant functional types
strucc_1 (numpy.ndarray): state variable, surface structural c
metabc_1 (numpy.ndarray): state variable, surface metabolic c
elitst (numpy.ndarray): parameter, effect of litter on soil
temperature relative to live and standing dead biomass
Returns:
biomass, total biomass for purposes of soil shading
"""
valid_mask = (
(weighted_live_c != _TARGET_NODATA) &
(weighted_dead_c != _TARGET_NODATA) &
(~numpy.isclose(strucc_1, _SV_NODATA)) &
(~numpy.isclose(metabc_1, _SV_NODATA)) &
(elitst != _IC_NODATA))
biomass = numpy.empty(weighted_live_c.shape, dtype=numpy.float32)
biomass[:] = _TARGET_NODATA
biomass[valid_mask] = (
(weighted_live_c[valid_mask] + weighted_dead_c[valid_mask]) * 2.5 +
(strucc_1[valid_mask] + metabc_1[valid_mask]) * 2.5 *
elitst[valid_mask])
return biomass
def _decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg, sv_reg):
"""Update soil C, N and P after decomposition.
C, N and P move from one surface or soil stock to another depending on the
availability of N and P in the decomposing stock. This function covers
lines 118-323 in Simsom.f, including decomp.f, litdec.f, somdec.f, and
pschem.f.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including precipitation, temperature, and
site spatial index
current_month (int): month of the year, such that current_month=1
indicates January
month_index (int): month of the simulation, such that month_index=13
indicates month 13 of the simulation
pft_id_set (set): set of integers identifying plant functional types
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
year_reg (dict): map of key, path pairs giving paths to annual
precipitation and annual N deposition rasters
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
pp_reg (dict): map of key, path pairs giving persistent parameters
including required ratios for decomposition, the effect of soil
texture on decomposition rate, and the effect of soil texture on
the rate of organic leaching
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month
Side effects:
creates all rasters in sv_reg pertaining to structural, metabolic,
som1, som2, and som3 C, N, and P; mineral N and P; and parent,
secondary, and occluded mineral P
Returns:
None
"""
def calc_N_fixation(precip, annual_precip, baseNdep, epnfs_2):
"""Calculate monthly atmospheric N fixation.
Atmospheric N fixation for the month is calculated from annual N
deposition, calculated once per year, according to the ratio of monthly
precipitation to annual precipitation. Total N fixed in this month is
scheduled to be added to the surface mineral N pool. Lines 193-205,
Simsom.f
Parameters:
precip (numpy.ndarray): input, monthly precipitation
annual_precip (numpy.ndarray): derived, annual precipitation
baseNdep (numpy.ndarray): derived, annual atmospheric N deposition
epnfs_2 (numpy.ndarray): parameter, intercept of regression
predicting N deposition from annual precipitation
Returns:
wdfxm, atmospheric N deposition for the current month
"""
valid_mask = (
(~numpy.isclose(precip, precip_nodata)) &
(annual_precip != _TARGET_NODATA) &
(annual_precip > 0) &
(baseNdep != _TARGET_NODATA) &
(epnfs_2 != _IC_NODATA))
wdfxm = numpy.zeros(precip.shape, dtype=numpy.float32)
wdfxm[valid_mask] = (
baseNdep[valid_mask] *
(precip[valid_mask] / annual_precip[valid_mask]) +
epnfs_2[valid_mask] *
numpy.minimum(annual_precip[valid_mask], 100.) *
(precip[valid_mask] / annual_precip[valid_mask]))
return wdfxm
def calc_rprpet(pevap, snowmelt, avh2o_3, precip):
"""Calculate the ratio of precipitation to ref evapotranspiration.
The ratio of precipitation or snowmelt to reference evapotranspiration
influences agdefac and bgdefac, the above- and belowground
decomposition factors.
Parameters:
pevap (numpy.ndarray): derived, reference evapotranspiration
snowmelt (numpy.ndarray): derived, snowmelt occuring this month
avh2o_3 (numpy.ndarray): derived, moisture in top two soil layers
precip (numpy.ndarray): input, precipitation for this month
Returns:
rprpet, the ratio of precipitation or snowmelt to reference
evapotranspiration
"""
valid_mask = (
(pevap != _TARGET_NODATA) &
(snowmelt != _TARGET_NODATA) &
(avh2o_3 != _TARGET_NODATA) &
(~numpy.isclose(precip, precip_nodata)))
rprpet = numpy.empty(pevap.shape, dtype=numpy.float32)
rprpet[:] = _TARGET_NODATA
snowmelt_mask = (valid_mask & (snowmelt > 0) & (pevap > 0))
rprpet[snowmelt_mask] = snowmelt[snowmelt_mask] / pevap[snowmelt_mask]
no_melt_mask = (valid_mask & (snowmelt <= 0))
rprpet[no_melt_mask] = (
(avh2o_3[no_melt_mask] + precip[no_melt_mask]) /
pevap[no_melt_mask])
return rprpet
def calc_bgwfunc(rprpet):
"""Calculate the impact of belowground water content on decomposition.
Bgwfunc reflects the effect of soil moisture on decomposition and is
also used to calculate shoot senescence due to water stress. It is
calculated from the ratio of soil water in the top two soil layers to
reference evapotranspiration.
Parameters:
rprpet (numpy.ndarray): derived, ratio of precipitation or snowmelt
to reference evapotranspiration
Returns:
bgwfunc, the effect of soil moisture on decomposition
"""
valid_mask = (rprpet != _TARGET_NODATA)
bgwfunc = numpy.empty(rprpet.shape, dtype=numpy.float32)
bgwfunc[:] = _TARGET_NODATA
bgwfunc[valid_mask] = (
1. / (1. + 30 * numpy.exp(-8.5 * rprpet[valid_mask])))
bgwfunc[(valid_mask & (rprpet > 9))] = 1
return bgwfunc
def calc_stemp(
biomass, snow, max_temp, min_temp, daylength, pmntmp, pmxtmp):
"""Calculate mean soil surface temperature for decomposition.
Soil surface temperature is modified from monthly temperature inputs
by estimated impacts of shading by aboveground biomass and litter, and
estimated daylength. Surftemp.f
Parameters:
biomass (numpy.ndarray): derived, sum of aboveground biomass and
surface litter across plant functional types
snow (numpy.ndarray): state variable, current snowpack
max_temp (numpy.ndarray): input, maximum temperature this month
min_temp (numpy.ndarray): input, minimum temperature this month
daylength (numpy.ndarray): derived, estimated hours of daylight
pmntmp (numpy.ndarray): parameter, effect of biomass on minimum
surface temperature
pmxtmp (numpy.ndarray): parameter, effect of biomass on maximum
surface temperature
Returns:
stemp, mean soil surface temperature for decomposition
"""
valid_mask = (
(biomass != _TARGET_NODATA) &
(snow != _SV_NODATA) &
(~numpy.isclose(max_temp, max_temp_nodata)) &
(~numpy.isclose(min_temp, min_temp_nodata)) &
(daylength != _TARGET_NODATA) &
(pmntmp != _IC_NODATA) &
(pmxtmp != _IC_NODATA))
tmxs = numpy.empty(biomass.shape, dtype=numpy.float32)
tmxs[valid_mask] = (
max_temp[valid_mask] + (25.4 / (1. + 18. * numpy.exp(
-0.2 * max_temp[valid_mask]))) *
(numpy.exp(pmxtmp[valid_mask] * biomass[valid_mask]) - 0.13))
tmns = numpy.empty(biomass.shape, dtype=numpy.float32)
tmns[valid_mask] = (
min_temp[valid_mask] + pmntmp[valid_mask] * biomass[valid_mask]
- 1.78)
shortday_mask = ((daylength < 12.) & valid_mask)
snow_mask = ((snow > 0) & valid_mask)
tmns_mlt = numpy.empty(biomass.shape, dtype=numpy.float32)
tmns_mlt[valid_mask] = (
((12. - daylength[valid_mask]) * 1.2 + 12.) / 24.)
tmns_mlt[shortday_mask] = (
((12 - daylength[shortday_mask]) * 3. + 12.) / 24.)
tmns_mlt[valid_mask] = numpy.clip(tmns_mlt[valid_mask], 0.05, 0.95)
stemp = numpy.empty(biomass.shape, dtype=numpy.float32)
stemp[:] = _TARGET_NODATA
stemp[valid_mask] = (
(1 - tmns_mlt[valid_mask]) * tmxs[valid_mask] +
tmns_mlt[valid_mask] * tmns[valid_mask])
stemp[snow_mask] = 0.
return stemp
def calc_defac(bgwfunc, stemp, teff_1, teff_2, teff_3, teff_4):
"""Calculate decomposition factor.
The decomposition factor influences the rate of surface and soil
decomposition and reflects the influence of soil temperature and
moisture. Lines 151-200, Cycle.f.
Parameters:
bgwfunc (numpy.ndarray): derived, effect of soil moisture on
decomposition
stemp (numpy.ndarray): derived, average soil surface temperature
teff_1 (numpy.ndarray): parameter, x location of inflection point
for calculating the effect of soil temperature on decomposition
factor
teff_2 (numpy.ndarray): parameter, y location of inflection point
for calculating the effect of soil temperature on decomposition
factor
teff_3 (numpy.ndarray): parameter, step size for calculating the
effect of soil temperature on decomposition factor
teff_4 (numpy.ndarray): parameter, slope of the line at the
inflection point, for calculating the effect of soil
temperature on decomposition factor
Returns:
defac, aboveground and belowground decomposition factor
"""
valid_mask = (
(bgwfunc != _TARGET_NODATA) &
(teff_1 != _IC_NODATA) &
(teff_2 != _IC_NODATA) &
(teff_3 != _IC_NODATA) &
(teff_4 != _IC_NODATA))
tfunc = numpy.empty(bgwfunc.shape, dtype=numpy.float32)
tfunc[:] = _TARGET_NODATA
tfunc[valid_mask] = numpy.maximum(
0.01,
(teff_2[valid_mask] + (teff_3[valid_mask] / numpy.pi) *
numpy.arctan(numpy.pi * teff_4[valid_mask] *
(stemp[valid_mask] - teff_1[valid_mask]))) /
(teff_2[valid_mask] + (teff_3[valid_mask] / numpy.pi) *
numpy.arctan(numpy.pi * teff_4[valid_mask] *
(30.0 - teff_1[valid_mask]))))
defac = numpy.empty(bgwfunc.shape, dtype=numpy.float32)
defac[:] = _TARGET_NODATA
defac[valid_mask] = numpy.maximum(
0., tfunc[valid_mask] * bgwfunc[valid_mask])
return defac
def calc_pheff_struc(pH):
"""Calculate the effect of soil pH on decomp of structural material.
The effect of soil pH on decomposition rate is a multiplier ranging
from 0 to 1. The effect on decomposition of structural material
differs from the effect on decomposition of metabolic material in
the values of two constants.
Parameters:
pH (numpy.ndarray): input, soil pH
Returns:
pheff_struc, the effect of soil pH on decomposition rate of
structural material
"""
valid_mask = (~numpy.isclose(pH, pH_nodata))
pheff_struc = numpy.empty(pH.shape, dtype=numpy.float32)
pheff_struc[valid_mask] = numpy.clip(
(0.5 + (1.1 / numpy.pi) *
numpy.arctan(numpy.pi * 0.7 * (pH[valid_mask] - 4.))), 0, 1)
return pheff_struc
def calc_pheff_metab(pH):
"""Calculate the effect of soil pH on decomp of metabolic material.
The effect of soil pH on decomposition rate is a multiplier ranging
from 0 to 1. The effect on decomposition of structural material
differs from the effect on decomposition of metabolic material in
the values of two constants.
Parameters:
pH (numpy.ndarray): input, soil pH
Returns:
pheff_metab, the effect of soil pH on decomposition rate of
metabolic material
"""
valid_mask = (~numpy.isclose(pH, pH_nodata))
pheff_metab = numpy.empty(pH.shape, dtype=numpy.float32)
pheff_metab[valid_mask] = numpy.clip(
(0.5 + (1.14 / numpy.pi) *
numpy.arctan(numpy.pi * 0.7 * (pH[valid_mask] - 4.8))), 0, 1)
return pheff_metab
def calc_pheff_som3(pH):
"""Calculate the effect of soil pH on decomposition of SOM3.
The effect of soil pH on decomposition rate is a multiplier ranging
from 0 to 1. The effect on decomposition of SOM3 differs from the
effect of pH on decomposition of other pools in the value of
constants.
Parameters:
pH (numpy.ndarray): input, soil pH
Returns:
pheff_som3, the effect of soil pH on decomposition rate of
SOM3
"""
valid_mask = (~numpy.isclose(pH, pH_nodata))
pheff_metab = numpy.empty(pH.shape, dtype=numpy.float32)
pheff_metab[valid_mask] = numpy.clip(
(0.5 + (1.1 / numpy.pi) *
numpy.arctan(numpy.pi * 0.7 * (pH[valid_mask] - 3.))), 0, 1)
return pheff_metab
precip_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['precip_{}'.format(month_index)])['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
max_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
pH_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['ph_path'])['nodata'][0]
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'd_statv_temp', 'operand_temp', 'shwave', 'pevap', 'rprpet',
'daylength', 'sum_aglivc', 'sum_stdedc', 'biomass', 'stemp',
'defac', 'anerb', 'gromin_1', 'pheff_struc', 'pheff_metab',
'aminrl_1', 'aminrl_2', 'fsol', 'tcflow', 'tosom2',
'net_tosom2', 'tosom1', 'net_tosom1', 'tosom3', 'cleach',
'pheff_som3', 'pflow']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
for iel in [1, 2]:
for val in ['rceto1', 'rceto2', 'rceto3']:
temp_val_dict['{}_{}'.format(val, iel)] = os.path.join(
temp_dir, '{}.tif'.format('{}_{}'.format(val, iel)))
param_val_dict = {}
for val in [
'fwloss_4', 'elitst', 'pmntmp', 'pmxtmp', 'teff_1', 'teff_2',
'teff_3', 'teff_4', 'drain', 'aneref_1', 'aneref_2', 'aneref_3',
'sorpmx', 'pslsrb', 'strmax_1', 'dec1_1', 'pligst_1', 'strmax_2',
'dec1_2', 'pligst_2', 'rsplig', 'ps1co2_1', 'ps1co2_2', 'dec2_1',
'pcemic1_1_1', 'pcemic1_2_1', 'pcemic1_3_1', 'pcemic1_1_2',
'pcemic1_2_2', 'pcemic1_3_2', 'varat1_1_1', 'varat1_2_1',
'varat1_3_1', 'varat1_1_2', 'varat1_2_2', 'varat1_3_2', 'dec2_2',
'pmco2_1', 'pmco2_2', 'rad1p_1_1', 'rad1p_2_1', 'rad1p_3_1',
'rad1p_1_2', 'rad1p_2_2', 'rad1p_3_2', 'dec3_1', 'p1co2a_1',
'varat22_1_1', 'varat22_2_1', 'varat22_3_1', 'varat22_1_2',
'varat22_2_2', 'varat22_3_2', 'dec3_2', 'animpt', 'varat3_1_1',
'varat3_2_1', 'varat3_3_1', 'varat3_1_2', 'varat3_2_2',
'varat3_3_2', 'omlech_3', 'dec5_2', 'p2co2_2', 'dec5_1', 'p2co2_1',
'dec4', 'p3co2', 'cmix', 'pparmn_2', 'psecmn_2', 'nlayer',
'pmnsec_2', 'psecoc1', 'psecoc2', 'epnfs_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# shwave, shortwave radiation outside the atmosphere
_shortwave_radiation(
aligned_inputs['site_index'], current_month, temp_val_dict['shwave'])
# pet, reference evapotranspiration modified by fwloss parameter
_reference_evapotranspiration(
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
temp_val_dict['shwave'], param_val_dict['fwloss_4'],
temp_val_dict['pevap'])
# rprpet, ratio of precipitation to reference evapotranspiration
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['pevap'], month_reg['snowmelt'],
sv_reg['avh2o_3_path'],
aligned_inputs['precip_{}'.format(month_index)]]],
calc_rprpet, temp_val_dict['rprpet'], gdal.GDT_Float32,
_TARGET_NODATA)
# bgwfunc, effect of soil moisture on decomposition
pygeoprocessing.raster_calculator(
[(temp_val_dict['rprpet'], 1)],
calc_bgwfunc, month_reg['bgwfunc'], gdal.GDT_Float32,
_TARGET_NODATA)
# estimated daylength
_calc_daylength(
aligned_inputs['site_index'], current_month,
temp_val_dict['daylength'])
# total biomass for purposes of soil shading
for sv in ['aglivc', 'stdedc']:
weighted_sum_path = temp_val_dict['sum_{}'.format(sv)]
weighted_state_variable_sum(
sv, prev_sv_reg, aligned_inputs, pft_id_set, weighted_sum_path)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['sum_aglivc'], temp_val_dict['sum_stdedc'],
prev_sv_reg['strucc_1_path'], prev_sv_reg['metabc_1_path'],
param_val_dict['elitst']]],
sum_biomass, temp_val_dict['biomass'], gdal.GDT_Float32,
_TARGET_NODATA)
# stemp, soil surface temperature for the purposes of decomposition
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['biomass'], sv_reg['snow_path'],
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)],
temp_val_dict['daylength'], param_val_dict['pmntmp'],
param_val_dict['pmxtmp']]],
calc_stemp, temp_val_dict['stemp'], gdal.GDT_Float32,
_TARGET_NODATA)
# defac, decomposition factor calculated from soil temp and moisture
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
month_reg['bgwfunc'], temp_val_dict['stemp'],
param_val_dict['teff_1'], param_val_dict['teff_2'],
param_val_dict['teff_3'], param_val_dict['teff_4']]],
calc_defac, temp_val_dict['defac'], gdal.GDT_Float32,
_TARGET_NODATA)
# anerb, impact of soil anaerobic conditions on decomposition
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['rprpet'], temp_val_dict['pevap'],
param_val_dict['drain'], param_val_dict['aneref_1'],
param_val_dict['aneref_2'], param_val_dict['aneref_3']]],
calc_anerb, temp_val_dict['anerb'], gdal.GDT_Float32,
_TARGET_NODATA)
# initialize gromin_1, gross mineralization of N
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], temp_val_dict['gromin_1'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0])
# pH effect on decomposition for structural material
pygeoprocessing.raster_calculator(
[(aligned_inputs['ph_path'], 1)],
calc_pheff_struc, temp_val_dict['pheff_struc'], gdal.GDT_Float32,
_TARGET_NODATA)
# pH effect on decomposition for metabolic material
pygeoprocessing.raster_calculator(
[(aligned_inputs['ph_path'], 1)],
calc_pheff_metab, temp_val_dict['pheff_metab'], gdal.GDT_Float32,
_TARGET_NODATA)
# initialize aminrl_1 and aminrl_2
shutil.copyfile(prev_sv_reg['minerl_1_1_path'], temp_val_dict['aminrl_1'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['minerl_1_2_path'], param_val_dict['sorpmx'],
param_val_dict['pslsrb']]],
fsfunc, temp_val_dict['fsol'], gdal.GDT_Float32, _TARGET_NODATA)
raster_multiplication(
prev_sv_reg['minerl_1_2_path'], _SV_NODATA,
temp_val_dict['fsol'], _TARGET_NODATA,
temp_val_dict['aminrl_2'], _SV_NODATA)
# initialize current month state variables and delta state variable dict
nlayer_max = int(max(
val['nlayer'] for val in site_param_table.values()))
delta_sv_dict = {
'minerl_1_1': os.path.join(temp_dir, 'minerl_1_1.tif'),
'parent_2': os.path.join(temp_dir, 'parent_2.tif'),
'secndy_2': os.path.join(temp_dir, 'secndy_2.tif'),
'occlud': os.path.join(temp_dir, 'occlud.tif'),
}
for lyr in range(1, nlayer_max + 1):
state_var = 'minerl_{}_2'.format(lyr)
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
delta_sv_dict[state_var] = os.path.join(
temp_dir, '{}.tif'.format(state_var))
# initialize mineral N in current sv_reg
for lyr in range(1, nlayer_max + 1):
state_var = 'minerl_{}_1'.format(lyr)
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for compartment in ['strlig']:
for lyr in [1, 2]:
state_var = '{}_{}'.format(compartment, lyr)
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for compartment in ['som3']:
state_var = '{}c'.format(compartment)
delta_sv_dict[state_var] = os.path.join(
temp_dir, '{}.tif'.format(state_var))
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for iel in [1, 2]:
state_var = '{}e_{}'.format(compartment, iel)
delta_sv_dict[state_var] = os.path.join(
temp_dir, '{}.tif'.format(state_var))
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for compartment in ['struc', 'metab', 'som1', 'som2']:
for lyr in [1, 2]:
state_var = '{}c_{}'.format(compartment, lyr)
delta_sv_dict[state_var] = os.path.join(
temp_dir, '{}.tif'.format(state_var))
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for iel in [1, 2]:
state_var = '{}e_{}_{}'.format(compartment, lyr, iel)
delta_sv_dict[state_var] = os.path.join(
temp_dir, '{}.tif'.format(state_var))
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for state_var in ['parent_2', 'secndy_2', 'occlud']:
shutil.copyfile(
prev_sv_reg['{}_path'.format(state_var)],
sv_reg['{}_path'.format(state_var)])
for dtm in range(4):
# initialize change (delta, d) in state variables for this decomp step
for state_var in delta_sv_dict.keys():
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], delta_sv_dict[state_var],
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[0])
if dtm == 0:
# schedule flow of N from atmospheric fixation to surface mineral
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['precip_{}'.format(month_index)],
year_reg['annual_precip_path'], year_reg['baseNdep_path'],
param_val_dict['epnfs_2']]],
calc_N_fixation, delta_sv_dict['minerl_1_1'],
gdal.GDT_Float32, _IC_NODATA)
# decomposition of structural material in surface and soil
for lyr in [1, 2]:
if lyr == 1:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['strucc_1_path'], sv_reg['struce_1_1_path'],
sv_reg['struce_1_2_path'], pp_reg['rnewas_1_1_path'],
pp_reg['rnewas_2_1_path'], param_val_dict['strmax_1'],
temp_val_dict['defac'], param_val_dict['dec1_1'],
param_val_dict['pligst_1'], sv_reg['strlig_1_path'],
temp_val_dict['pheff_struc']]],
calc_tcflow_strucc_1, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['strucc_2_path'], sv_reg['struce_2_1_path'],
sv_reg['struce_2_2_path'], pp_reg['rnewbs_1_1_path'],
pp_reg['rnewbs_2_1_path'], param_val_dict['strmax_2'],
temp_val_dict['defac'], param_val_dict['dec1_2'],
param_val_dict['pligst_2'], sv_reg['strlig_2_path'],
temp_val_dict['pheff_struc'], temp_val_dict['anerb']]],
calc_tcflow_strucc_2, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['strucc_{}'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['strucc_{}'.format(lyr)], _IC_NODATA)
# structural material decomposes first to SOM2
raster_multiplication(
temp_val_dict['tcflow'], _IC_NODATA,
sv_reg['strlig_{}_path'.format(lyr)], _SV_NODATA,
temp_val_dict['tosom2'], _IC_NODATA)
# microbial respiration with decomposition to SOM2
respiration(
temp_val_dict['tosom2'], param_val_dict['rsplig'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_1_path'.format(lyr)],
delta_sv_dict['struce_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tosom2'], param_val_dict['rsplig'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_2_path'.format(lyr)],
delta_sv_dict['struce_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tosom2'], param_val_dict['rsplig']]],
calc_net_cflow, temp_val_dict['net_tosom2'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_{}'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom2'], _IC_NODATA,
delta_sv_dict['som2c_{}'.format(lyr)], _IC_NODATA)
if lyr == 1:
rcetob = 'rnewas'
else:
rcetob = 'rnewbs'
# N and P flows from STRUC to SOM2
nutrient_flow(
temp_val_dict['net_tosom2'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_1_path'.format(lyr)],
pp_reg['{}_1_2_path'.format(rcetob)],
sv_reg['minerl_1_1_path'],
delta_sv_dict['struce_{}_1'.format(lyr)],
delta_sv_dict['som2e_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom2'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_2_path'.format(lyr)],
pp_reg['{}_2_2_path'.format(rcetob)],
sv_reg['minerl_1_2_path'],
delta_sv_dict['struce_{}_2'.format(lyr)],
delta_sv_dict['som2e_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
# structural material decomposes next to SOM1
raster_difference(
temp_val_dict['tcflow'], _IC_NODATA, temp_val_dict['tosom2'],
_IC_NODATA, temp_val_dict['tosom1'], _IC_NODATA)
# microbial respiration with decomposition to SOM1
respiration(
temp_val_dict['tosom1'],
param_val_dict['ps1co2_{}'.format(lyr)],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_1_path'.format(lyr)],
delta_sv_dict['struce_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tosom1'],
param_val_dict['ps1co2_{}'.format(lyr)],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_2_path'.format(lyr)],
delta_sv_dict['struce_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tosom1'],
param_val_dict['ps1co2_{}'.format(lyr)]]],
calc_net_cflow, temp_val_dict['net_tosom1'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_{}'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom1'], _IC_NODATA,
delta_sv_dict['som1c_{}'.format(lyr)], _IC_NODATA)
if lyr == 1:
rcetob = 'rnewas'
else:
rcetob = 'rnewbs'
# N and P flows from STRUC to SOM1
nutrient_flow(
temp_val_dict['net_tosom1'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_1_path'.format(lyr)],
pp_reg['{}_1_1_path'.format(rcetob)],
sv_reg['minerl_1_1_path'],
delta_sv_dict['struce_{}_1'.format(lyr)],
delta_sv_dict['som1e_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom1'],
sv_reg['strucc_{}_path'.format(lyr)],
sv_reg['struce_{}_2_path'.format(lyr)],
pp_reg['{}_2_1_path'.format(rcetob)],
sv_reg['minerl_1_2_path'],
delta_sv_dict['struce_{}_2'.format(lyr)],
delta_sv_dict['som1e_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
# decomposition of metabolic material in surface and soil to SOM1
for lyr in [1, 2]:
if lyr == 1:
for iel in [1, 2]:
# required ratio for surface metabolic decomposing to SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['metabe_1_{}_path'.format(iel)],
sv_reg['metabc_1_path'],
param_val_dict['pcemic1_1_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)],
param_val_dict['pcemic1_3_{}'.format(iel)]]],
_aboveground_ratio,
temp_val_dict['rceto1_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['metabc_1_path'], sv_reg['metabe_1_1_path'],
sv_reg['metabe_1_2_path'], temp_val_dict['rceto1_1'],
temp_val_dict['rceto1_2'], temp_val_dict['defac'],
param_val_dict['dec2_1'],
temp_val_dict['pheff_metab']]],
calc_tcflow_surface, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
else:
for iel in [1, 2]:
# required ratio for soil metabolic decomposing to SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_{}'.format(iel)],
param_val_dict['varat1_1_{}'.format(iel)],
param_val_dict['varat1_2_{}'.format(iel)],
param_val_dict['varat1_3_{}'.format(iel)]]],
_belowground_ratio,
temp_val_dict['rceto1_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['metabc_2_path'], sv_reg['metabe_2_1_path'],
sv_reg['metabe_2_2_path'], temp_val_dict['rceto1_1'],
temp_val_dict['rceto1_2'], temp_val_dict['defac'],
param_val_dict['dec2_2'], temp_val_dict['pheff_metab'],
temp_val_dict['anerb']]],
calc_tcflow_soil, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['metabc_{}'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['metabc_{}'.format(lyr)], _IC_NODATA)
# microbial respiration with decomposition to SOM1
respiration(
temp_val_dict['tcflow'],
param_val_dict['pmco2_{}'.format(lyr)],
sv_reg['metabc_{}_path'.format(lyr)],
sv_reg['metabe_{}_1_path'.format(lyr)],
delta_sv_dict['metabe_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'],
param_val_dict['pmco2_{}'.format(lyr)],
sv_reg['metabc_{}_path'.format(lyr)],
sv_reg['metabe_{}_2_path'.format(lyr)],
delta_sv_dict['metabe_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'],
param_val_dict['pmco2_{}'.format(lyr)]]],
calc_net_cflow, temp_val_dict['net_tosom1'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_{}'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom1'], _IC_NODATA,
delta_sv_dict['som1c_{}'.format(lyr)], _IC_NODATA)
nutrient_flow(
temp_val_dict['net_tosom1'],
sv_reg['metabc_{}_path'.format(lyr)],
sv_reg['metabe_{}_1_path'.format(lyr)],
temp_val_dict['rceto1_1'], sv_reg['minerl_1_1_path'],
delta_sv_dict['metabe_{}_1'.format(lyr)],
delta_sv_dict['som1e_{}_1'.format(lyr)],
delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom1'],
sv_reg['metabc_{}_path'.format(lyr)],
sv_reg['metabe_{}_2_path'.format(lyr)],
temp_val_dict['rceto1_2'], sv_reg['minerl_1_2_path'],
delta_sv_dict['metabe_{}_2'.format(lyr)],
delta_sv_dict['som1e_{}_2'.format(lyr)],
delta_sv_dict['minerl_1_2'])
# decomposition of surface SOM1 to surface SOM2: line 63 Somdec.f
for iel in [1, 2]:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['som1c_1_path'],
sv_reg['som1e_1_{}_path'.format(iel)],
param_val_dict['rad1p_1_{}'.format(iel)],
param_val_dict['rad1p_2_{}'.format(iel)],
param_val_dict['rad1p_3_{}'.format(iel)],
param_val_dict['pcemic1_2_{}'.format(iel)]]],
calc_surface_som2_ratio,
temp_val_dict['rceto2_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['som1c_1_path'], sv_reg['som1e_1_1_path'],
sv_reg['som1e_1_2_path'], temp_val_dict['rceto2_1'],
temp_val_dict['rceto2_2'], temp_val_dict['defac'],
param_val_dict['dec3_1'],
temp_val_dict['pheff_struc']]],
calc_tcflow_surface, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_1'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som1c_1'], _IC_NODATA)
# microbial respiration with decomposition to SOM2
respiration(
temp_val_dict['tcflow'], param_val_dict['p1co2a_1'],
sv_reg['som1c_1_path'], sv_reg['som1e_1_1_path'],
delta_sv_dict['som1e_1_1'], delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'], param_val_dict['p1co2a_1'],
sv_reg['som1c_1_path'], sv_reg['som1e_1_2_path'],
delta_sv_dict['som1e_1_2'], delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], param_val_dict['p1co2a_1']]],
calc_net_cflow, temp_val_dict['net_tosom2'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_1'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom2'], _IC_NODATA,
delta_sv_dict['som2c_1'], _IC_NODATA)
# N and P flows from som1e_1 to som2e_1, line 123 Somdec.f
nutrient_flow(
temp_val_dict['net_tosom2'], sv_reg['som1c_1_path'],
sv_reg['som1e_1_1_path'], temp_val_dict['rceto2_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som1e_1_1'],
delta_sv_dict['som2e_1_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom2'], sv_reg['som1c_1_path'],
sv_reg['som1e_1_2_path'], temp_val_dict['rceto2_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som1e_1_2'],
delta_sv_dict['som2e_1_2'], delta_sv_dict['minerl_1_2'])
# soil SOM1 decomposes to soil SOM3 and SOM2, line 137 Somdec.f
for iel in [1, 2]:
# required ratio for soil SOM1 decomposing to SOM2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_{}'.format(iel)],
param_val_dict['varat22_1_{}'.format(iel)],
param_val_dict['varat22_2_{}'.format(iel)],
param_val_dict['varat22_3_{}'.format(iel)]]],
_belowground_ratio,
temp_val_dict['rceto2_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['som1c_2_path'], sv_reg['som1e_2_1_path'],
sv_reg['som1e_2_2_path'], temp_val_dict['rceto2_1'],
temp_val_dict['rceto2_2'], temp_val_dict['defac'],
param_val_dict['dec3_2'], pp_reg['eftext_path'],
temp_val_dict['anerb'], temp_val_dict['pheff_metab']]],
calc_tcflow_som1c_2, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_2'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som1c_2'], _IC_NODATA)
# microbial respiration with decomposition to SOM3, line 179
respiration(
temp_val_dict['tcflow'], pp_reg['p1co2_2_path'],
sv_reg['som1c_2_path'], sv_reg['som1e_2_1_path'],
delta_sv_dict['som1e_2_1'], delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'], pp_reg['p1co2_2_path'],
sv_reg['som1c_2_path'], sv_reg['som1e_2_2_path'],
delta_sv_dict['som1e_2_2'], delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], pp_reg['fps1s3_path'],
param_val_dict['animpt'], temp_val_dict['anerb']]],
calc_som3_flow, temp_val_dict['tosom3'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(delta_sv_dict['som3c'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tosom3'], _IC_NODATA,
delta_sv_dict['som3c'], _IC_NODATA)
for iel in [1, 2]:
# required ratio for soil SOM1 decomposing to SOM3, line 198
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_{}'.format(iel)],
param_val_dict['varat3_1_{}'.format(iel)],
param_val_dict['varat3_2_{}'.format(iel)],
param_val_dict['varat3_3_{}'.format(iel)]]],
_belowground_ratio,
temp_val_dict['rceto3_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
nutrient_flow(
temp_val_dict['tosom3'], sv_reg['som1c_2_path'],
sv_reg['som1e_2_1_path'], temp_val_dict['rceto3_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som1e_2_1'],
delta_sv_dict['som3e_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['tosom3'], sv_reg['som1c_2_path'],
sv_reg['som1e_2_2_path'], temp_val_dict['rceto3_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som1e_2_2'],
delta_sv_dict['som3e_2'], delta_sv_dict['minerl_1_2'])
# organic leaching: line 204 Somdec.f
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
month_reg['amov_2'], temp_val_dict['tcflow'],
param_val_dict['omlech_3'], pp_reg['orglch_path']]],
calc_c_leach, temp_val_dict['cleach'], gdal.GDT_Float32,
_TARGET_NODATA)
for iel in [1, 2]:
remove_leached_iel(
sv_reg['som1c_2_path'], sv_reg['som1e_2_{}_path'.format(iel)],
temp_val_dict['cleach'],
delta_sv_dict['som1e_2_{}'.format(iel)], iel)
# rest of flow from soil SOM1 goes to SOM2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], pp_reg['p1co2_2_path'],
temp_val_dict['tosom3'], temp_val_dict['cleach']]],
calc_net_cflow_tosom2, temp_val_dict['net_tosom2'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom2'], _IC_NODATA,
delta_sv_dict['som2c_2'], _IC_NODATA)
# N and P flows from soil SOM1 to soil SOM2, line 257
nutrient_flow(
temp_val_dict['net_tosom2'],
sv_reg['som1c_2_path'], sv_reg['som1e_2_1_path'],
temp_val_dict['rceto2_1'], sv_reg['minerl_1_1_path'],
delta_sv_dict['som1e_2_1'], delta_sv_dict['som2e_2_1'],
delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom2'],
sv_reg['som1c_2_path'], sv_reg['som1e_2_2_path'],
temp_val_dict['rceto2_2'], sv_reg['minerl_1_2_path'],
delta_sv_dict['som1e_2_2'], delta_sv_dict['som2e_2_2'],
delta_sv_dict['minerl_1_2'])
# soil SOM2 decomposing to soil SOM1 and SOM3, line 269
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['som2c_2_path'], sv_reg['som2e_2_1_path'],
sv_reg['som2e_2_2_path'], temp_val_dict['rceto1_1'],
temp_val_dict['rceto1_2'], temp_val_dict['defac'],
param_val_dict['dec5_2'], temp_val_dict['pheff_metab'],
temp_val_dict['anerb']]],
calc_tcflow_soil, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_2'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som2c_2'], _IC_NODATA)
respiration(
temp_val_dict['tcflow'], param_val_dict['pmco2_2'],
sv_reg['som2c_2_path'], sv_reg['som2e_2_1_path'],
delta_sv_dict['som2e_2_1'], delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'], param_val_dict['pmco2_2'],
sv_reg['som2c_2_path'], sv_reg['som2e_2_2_path'],
delta_sv_dict['som2e_2_2'], delta_sv_dict['minerl_1_2'])
# soil SOM2 flows first to SOM3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], pp_reg['fps2s3_path'],
param_val_dict['animpt'], temp_val_dict['anerb']]],
calc_som3_flow, temp_val_dict['tosom3'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(delta_sv_dict['som3c'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tosom3'], _IC_NODATA,
delta_sv_dict['som3c'], _IC_NODATA)
nutrient_flow(
temp_val_dict['tosom3'], sv_reg['som2c_2_path'],
sv_reg['som2e_2_1_path'], temp_val_dict['rceto3_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som2e_2_1'],
delta_sv_dict['som3e_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['tosom3'], sv_reg['som2c_2_path'],
sv_reg['som2e_2_2_path'], temp_val_dict['rceto3_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som2e_2_2'],
delta_sv_dict['som3e_2'], delta_sv_dict['minerl_1_2'])
# rest of flow from soil SOM2 goes to soil SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], param_val_dict['p2co2_2'],
temp_val_dict['tosom3']]],
calc_net_cflow_tosom1, temp_val_dict['net_tosom1'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['net_tosom1'], _IC_NODATA,
delta_sv_dict['som1c_2'], _IC_NODATA)
nutrient_flow(
temp_val_dict['net_tosom1'], sv_reg['som2c_2_path'],
sv_reg['som2e_2_1_path'], temp_val_dict['rceto1_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som2e_2_1'],
delta_sv_dict['som1e_2_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['net_tosom1'], sv_reg['som2c_2_path'],
sv_reg['som2e_2_2_path'], temp_val_dict['rceto1_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som2e_2_2'],
delta_sv_dict['som1e_2_2'], delta_sv_dict['minerl_1_2'])
# surface SOM2 decomposes to surface SOM1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['som2c_1_path'], sv_reg['som2e_1_1_path'],
sv_reg['som2e_1_2_path'], temp_val_dict['rceto1_1'],
temp_val_dict['rceto1_2'], temp_val_dict['defac'],
param_val_dict['dec5_1'], temp_val_dict['pheff_struc']]],
calc_tcflow_surface, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_1'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som2c_1'], _IC_NODATA)
respiration(
temp_val_dict['tcflow'], param_val_dict['p2co2_1'],
sv_reg['som2c_1_path'], sv_reg['som2e_1_1_path'],
delta_sv_dict['som2e_1_1'], delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'], param_val_dict['p2co2_1'],
sv_reg['som2c_1_path'], sv_reg['som2e_1_2_path'],
delta_sv_dict['som2e_1_2'], delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], param_val_dict['p2co2_1']]],
calc_net_cflow, temp_val_dict['tosom1'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_1'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tosom1'], _IC_NODATA,
delta_sv_dict['som1c_1'], _IC_NODATA)
nutrient_flow(
temp_val_dict['tosom1'], sv_reg['som2c_1_path'],
sv_reg['som2e_1_1_path'], temp_val_dict['rceto1_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som2e_1_1'],
delta_sv_dict['som1e_1_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['tosom1'], sv_reg['som2c_1_path'],
sv_reg['som2e_1_2_path'], temp_val_dict['rceto1_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som2e_1_2'],
delta_sv_dict['som1e_1_2'], delta_sv_dict['minerl_1_2'])
# SOM3 decomposing to soil SOM1
# pH effect on decomposition of SOM3
pygeoprocessing.raster_calculator(
[(aligned_inputs['ph_path'], 1)],
calc_pheff_som3, temp_val_dict['pheff_som3'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['aminrl_1'], temp_val_dict['aminrl_2'],
sv_reg['som3c_path'], sv_reg['som3e_1_path'],
sv_reg['som3e_2_path'], temp_val_dict['rceto1_1'],
temp_val_dict['rceto1_2'], temp_val_dict['defac'],
param_val_dict['dec4'], temp_val_dict['pheff_som3'],
temp_val_dict['anerb']]],
calc_tcflow_soil, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som3c'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som3c'], _IC_NODATA)
respiration(
temp_val_dict['tcflow'], param_val_dict['p3co2'],
sv_reg['som3c_path'], sv_reg['som3e_1_path'],
delta_sv_dict['som3e_1'], delta_sv_dict['minerl_1_1'],
gromin_1_path=temp_val_dict['gromin_1'])
respiration(
temp_val_dict['tcflow'], param_val_dict['p3co2'],
sv_reg['som3c_path'], sv_reg['som3e_2_path'],
delta_sv_dict['som3e_2'], delta_sv_dict['minerl_1_2'])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tcflow'], param_val_dict['p3co2']]],
calc_net_cflow, temp_val_dict['tosom1'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['som1c_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tosom1'], _IC_NODATA,
delta_sv_dict['som1c_2'], _IC_NODATA)
nutrient_flow(
temp_val_dict['tosom1'], sv_reg['som3c_path'],
sv_reg['som3e_1_path'], temp_val_dict['rceto1_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som3e_1'],
delta_sv_dict['som1e_2_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['tosom1'], sv_reg['som3c_path'],
sv_reg['som3e_2_path'], temp_val_dict['rceto1_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som3e_2'],
delta_sv_dict['som1e_2_2'], delta_sv_dict['minerl_1_2'])
# Surface SOM2 flows to soil SOM2 via mixing
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['som2c_1_path'], param_val_dict['cmix'],
temp_val_dict['defac']]],
calc_som2_flow, temp_val_dict['tcflow'],
gdal.GDT_Float32, _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_1'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som2c_1'], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['som2c_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['tcflow'], _IC_NODATA,
delta_sv_dict['som2c_2'], _IC_NODATA)
# ratios for N and P entering soil som2 via mixing
raster_division(
sv_reg['som2c_1_path'], _SV_NODATA,
sv_reg['som2e_1_1_path'], _IC_NODATA,
temp_val_dict['rceto2_1'], _IC_NODATA)
raster_division(
sv_reg['som2c_1_path'], _SV_NODATA,
sv_reg['som2e_1_2_path'], _IC_NODATA,
temp_val_dict['rceto2_2'], _IC_NODATA)
nutrient_flow(
temp_val_dict['tcflow'], sv_reg['som2c_1_path'],
sv_reg['som2e_1_1_path'], temp_val_dict['rceto2_1'],
sv_reg['minerl_1_1_path'], delta_sv_dict['som2e_1_1'],
delta_sv_dict['som2e_2_1'], delta_sv_dict['minerl_1_1'],
gromin_path=temp_val_dict['gromin_1'])
nutrient_flow(
temp_val_dict['tcflow'], sv_reg['som2c_1_path'],
sv_reg['som2e_1_2_path'], temp_val_dict['rceto2_2'],
sv_reg['minerl_1_2_path'], delta_sv_dict['som2e_1_2'],
delta_sv_dict['som2e_2_2'], delta_sv_dict['minerl_1_2'])
# P flow from parent to mineral: Pschem.f
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['parent_2_path'], param_val_dict['pparmn_2'],
temp_val_dict['defac']]],
calc_pflow, temp_val_dict['pflow'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['parent_2'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['parent_2'], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['minerl_1_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['minerl_1_2'], _IC_NODATA)
# P flow from secondary to mineral
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['secndy_2_path'], param_val_dict['psecmn_2'],
temp_val_dict['defac']]],
calc_pflow, temp_val_dict['pflow'], gdal.GDT_Float64,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['secndy_2'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['secndy_2'], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['minerl_1_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['minerl_1_2'], _IC_NODATA)
# P flow from mineral to secondary
for lyr in range(1, nlayer_max + 1):
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['minerl_{}_2_path'.format(lyr)],
param_val_dict['pmnsec_2'], temp_val_dict['fsol'],
temp_val_dict['defac']]],
calc_pflow_to_secndy, temp_val_dict['pflow'], gdal.GDT_Float64,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['minerl_{}_2'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['minerl_{}_2'.format(lyr)], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['secndy_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['secndy_2'], _IC_NODATA)
# P flow from secondary to occluded
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['secndy_2_path'], param_val_dict['psecoc1'],
temp_val_dict['defac']]],
calc_pflow, temp_val_dict['pflow'], gdal.GDT_Float64,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['secndy_2'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['secndy_2'], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['occlud'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['occlud'], _IC_NODATA)
# P flow from occluded to secondary
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['occlud_path'], param_val_dict['psecoc2'],
temp_val_dict['defac']]],
calc_pflow, temp_val_dict['pflow'], gdal.GDT_Float64,
_IC_NODATA)
shutil.copyfile(
delta_sv_dict['occlud'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['occlud'], _IC_NODATA)
shutil.copyfile(
delta_sv_dict['secndy_2'], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _IC_NODATA,
temp_val_dict['pflow'], _IC_NODATA,
delta_sv_dict['secndy_2'], _IC_NODATA)
# accumulate flows
compartment = 'som3'
state_var = '{}c'.format(compartment)
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
for iel in [1, 2]:
state_var = '{}e_{}'.format(compartment, iel)
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
for compartment in ['struc', 'metab', 'som1', 'som2']:
for lyr in [1, 2]:
state_var = '{}c_{}'.format(compartment, lyr)
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
for iel in [1, 2]:
state_var = '{}e_{}_{}'.format(compartment, lyr, iel)
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
for iel in [1, 2]:
state_var = 'minerl_1_{}'.format(iel)
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
for state_var in ['parent_2', 'secndy_2', 'occlud']:
shutil.copyfile(
sv_reg['{}_path'.format(state_var)],
temp_val_dict['operand_temp'])
raster_sum(
delta_sv_dict[state_var], _IC_NODATA,
temp_val_dict['operand_temp'], _SV_NODATA,
sv_reg['{}_path'.format(state_var)], _SV_NODATA)
# update aminrl: Simsom.f line 301
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sv_reg['minerl_1_2_path'], param_val_dict['sorpmx'],
param_val_dict['pslsrb']]],
fsfunc, temp_val_dict['fsol'], gdal.GDT_Float32, _TARGET_NODATA)
update_aminrl(
sv_reg['minerl_1_1_path'], sv_reg['minerl_1_2_path'],
temp_val_dict['fsol'], temp_val_dict['aminrl_1'],
temp_val_dict['aminrl_2'])
# volatilization loss of N: line 323 Simsom.f
raster_multiplication(
temp_val_dict['gromin_1'], _TARGET_NODATA,
pp_reg['vlossg_path'], _IC_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA)
shutil.copyfile(
sv_reg['minerl_1_1_path'], temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
sv_reg['minerl_1_1_path'], _SV_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def partit(
cpart_path, epart_1_path, epart_2_path, frlign_path,
site_index_path, site_param_table, lyr, sv_reg):
"""Partition incoming material into structural and metabolic pools.
When organic material is added to the soil, for example as dead
biomass falls and becomes litter, or when organic material is added
from animal waste, it is partitioned into structural (STRUCC_lyr) and
metabolic (METABC_lyr) material according to the ratio of lignin to N in
the residue. As residue is partitioned, some N and P may be directly
absorbed from surface mineral N or P into the residue.
Parameters:
cpart_path (string): path to raster containing C in incoming material
that is to be partitioned
epart_1_path (string): path to raster containing N in incoming
material
epart_2_path (string): path to raster containing P in incoming
material
frlign_path (string): path to raster containing fraction of incoming
material that is lignin
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
lyr (int): layer which is receiving the incoming material (i.e.,
1=surface layer, 2=soil layer)
sv_reg (dict): map of key, path pairs giving paths to current state
variables
Side effects:
modifies the rasters indicated by the following paths:
sv_reg['minerl_1_1_path']
sv_reg['minerl_1_2_path']
sv_reg['metabc_{}_path'.format(lyr)]
sv_reg['strucc_{}_path'.format(lyr)]
sv_reg['metabe_{}_1_path'.format(lyr)]
sv_reg['metabe_{}_2_path'.format(lyr)]
sv_reg['struce_{}_1_path'.format(lyr)]
sv_reg['struce_{}_2_path'.format(lyr)]
sv_reg['strlig_{}_path'.format(lyr)]
Returns:
None
"""
def calc_dirabs(
cpart, epart_iel, minerl_1_iel, damr_lyr_iel, pabres, damrmn_iel):
"""Calculate direct absorption of mineral N or P.
When organic material is added to the soil, some mineral N or P may
be directly absorbed from the surface mineral layer into the incoming
material. the amount transferred depends on the N or P in the incoming
material and the required C/N or C/P ratio of receiving material.
Parameters:
cpart (numpy.ndarray): derived, C in incoming material
epart_iel (numpy.ndarray): derived, <iel> in incoming material
minerl_1_iel (numpy.ndarray): state variable, surface mineral <iel>
damr_lyr_iel (numpy.ndarray): parameter, fraction of iel in lyr
absorbed by residue
pabres (numpy.ndarray): parameter, amount of residue which will
give maximum direct absorption of iel
damrmn_iel (numpy.ndarray): parameter, minimum C/iel ratio allowed
in residue after direct absorption
Returns:
dirabs_iel, <iel> (N or P) absorbed from the surface mineral pool
"""
valid_mask = (
(cpart != _TARGET_NODATA) &
(epart_iel != _TARGET_NODATA) &
(~numpy.isclose(minerl_1_iel, _SV_NODATA)) &
(damr_lyr_iel != _IC_NODATA) &
(pabres != _IC_NODATA) &
(damrmn_iel != _IC_NODATA))
dirabs_iel = numpy.empty(cpart.shape, dtype=numpy.float32)
dirabs_iel[:] = _TARGET_NODATA
dirabs_iel[valid_mask] = 0.
minerl_mask = ((minerl_1_iel >= 0) & valid_mask)
dirabs_iel[minerl_mask] = (
damr_lyr_iel[minerl_mask] * minerl_1_iel[minerl_mask] *
numpy.maximum(cpart[minerl_mask] / pabres[minerl_mask], 1.))
# rcetot: C/E ratio of incoming material
rcetot = numpy.empty(cpart.shape, dtype=numpy.float32)
rcetot[:] = _IC_NODATA
e_sufficient_mask = (((epart_iel + dirabs_iel) > 0) & valid_mask)
rcetot[valid_mask] = 0
rcetot[e_sufficient_mask] = (
cpart[e_sufficient_mask] / (
epart_iel[e_sufficient_mask] + dirabs_iel[e_sufficient_mask]))
dirabs_mod_mask = ((rcetot < damrmn_iel) & valid_mask)
dirabs_iel[dirabs_mod_mask] = numpy.maximum(
cpart[dirabs_mod_mask] / damrmn_iel[dirabs_mod_mask] -
epart_iel[dirabs_mod_mask], 0.)
return dirabs_iel
def calc_d_metabc_lyr(cpart, epart_1, dirabs_1, frlign, spl_1, spl_2):
"""Calculate the change in metabolic C after addition of new material.
Parameters:
cpart (numpy.ndarray): C in incoming material
epart_1 (numpy.ndarray): N in incoming material
dirabs_1 (numpy.ndarray): derived, direct aborption of mineral N
into incoming material
frlign (numpy.ndarray): fraction of incoming material which is
lignin
spl_1 (numpy.ndarray): parameter, intercept of regression
predicting fraction of residue going to metabolic
spl_2 (numpy.ndarray): parameter, slope of regression predicting
fraction of residue going to metabolic
Returns:
d_metabc_lyr, change in metabc_lyr
"""
valid_mask = (
(cpart != _TARGET_NODATA) &
(epart_1 != _TARGET_NODATA) &
(dirabs_1 != _TARGET_NODATA) &
(frlign != _TARGET_NODATA) &
(spl_1 != _IC_NODATA) &
(spl_2 != _IC_NODATA))
movt_mask = ((cpart > 0) & valid_mask)
# rlnres: ratio of lignin to N in the incoming material
rlnres = numpy.empty(cpart.shape, dtype=numpy.float32)
rlnres[:] = _TARGET_NODATA
rlnres[valid_mask] = 0.
rlnres[movt_mask] = (
frlign[movt_mask] / (
(epart_1[movt_mask] + dirabs_1[movt_mask]) /
(cpart[movt_mask] * 2.5)))
# frmet: fraction of cpart that goes to metabolic
frmet = numpy.empty(cpart.shape, dtype=numpy.float32)
frmet[:] = _TARGET_NODATA
frmet[valid_mask] = (
spl_1[valid_mask] - spl_2[valid_mask] * rlnres[valid_mask])
lign_exceeded_mask = ((frlign > (1. - frmet)) & valid_mask)
frmet[lign_exceeded_mask] = 1. - frlign[lign_exceeded_mask]
d_metabc_lyr = numpy.empty(cpart.shape, dtype=numpy.float32)
d_metabc_lyr[:] = _TARGET_NODATA
d_metabc_lyr[valid_mask] = cpart[valid_mask] * frmet[valid_mask]
return d_metabc_lyr
def calc_d_strucc_lyr(cpart, d_metabc_lyr):
"""Calculate change in structural C after addition of new material.
Parameters:
cpart (numpy.ndarray): derived, C in incoming material
d_metabc_lyr (numpy.ndarray) derived, change in metabc_lyr
Returns:
d_strucc_lyr, change in strucc_lyr
"""
valid_mask = (
(cpart != _TARGET_NODATA) &
(d_metabc_lyr != _TARGET_NODATA))
d_strucc_lyr = numpy.empty(cpart.shape, dtype=numpy.float32)
d_strucc_lyr[:] = _TARGET_NODATA
d_strucc_lyr[valid_mask] = cpart[valid_mask] - d_metabc_lyr[valid_mask]
return d_strucc_lyr
def calc_d_struce_lyr_iel(d_strucc_lyr, rcestr_iel):
"""Calculate the change in N or P in structural material in layer lyr.
Parameters:
d_strucc_lyr (numpy.ndarray): change in strucc_lyr with addition of
incoming material
rcestr_iel (numpy.ndarray): parameter, C/<iel> ratio for structural
material
Returns:
d_struce_lyr_iel, change in structural N or P in layer lyr
"""
valid_mask = (
(d_strucc_lyr != _TARGET_NODATA) &
(rcestr_iel != _IC_NODATA))
d_struce_lyr_iel = numpy.empty(d_strucc_lyr.shape, dtype=numpy.float32)
d_struce_lyr_iel[valid_mask] = (
d_strucc_lyr[valid_mask] / rcestr_iel[valid_mask])
return d_struce_lyr_iel
def calc_d_metabe_lyr_iel(cpart, epart_iel, dirabs_iel, d_struce_lyr_iel):
"""Calculate the change in N or P in metabolic material in layer lyr.
Parameters:
cpart (numpy.ndarray): C in incoming material
epart_iel (numpy.ndarray): <iel> in incoming material
dirabs_iel (numpy.ndarray): <iel> absorbed from the surface mineral
pool
d_struce_lyr_iel (numpy.ndarray): change in structural N or P in
layer lyr
Returns:
d_metabe_lyr_iel, change in metabolic N or P in layer lyr
"""
valid_mask = (
(cpart != _TARGET_NODATA) &
(epart_iel != _TARGET_NODATA) &
(dirabs_iel != _TARGET_NODATA) &
(d_struce_lyr_iel != _TARGET_NODATA))
d_metabe_lyr_iel = numpy.empty(cpart.shape, dtype=numpy.float32)
d_metabe_lyr_iel[:] = _TARGET_NODATA
d_metabe_lyr_iel[valid_mask] = (
epart_iel[valid_mask] + dirabs_iel[valid_mask] -
d_struce_lyr_iel[valid_mask])
return d_metabe_lyr_iel
def calc_d_strlig_lyr(frlign, d_strucc_lyr, cpart, strlig_lyr, strucc_lyr):
"""Calculate change in fraction of lignin in structural material.
Parameters:
frlign (numpy.ndarray): fraction of incoming material which is
lignin
d_strucc_lyr (numpy.ndarray): change in strucc_lyr with addition of
incoming material
cpart (numpy.ndarray): C in incoming material
strlig_lyr (numpy.ndarray): state variable, lignin in structural
material in receiving layer
strucc_lyr (numpy.ndarray): state variable, C in structural
material in layer lyr
Returns:
d_strlig_lyr, change in fraction of lignin in structural material
in layer lyr
"""
valid_mask = (
(frlign != _TARGET_NODATA) &
(d_strucc_lyr != _TARGET_NODATA) &
(cpart != _TARGET_NODATA) &
(~numpy.isclose(strlig_lyr, _SV_NODATA)) &
(~numpy.isclose(strucc_lyr, _SV_NODATA)))
movt_mask = ((cpart > 0) & valid_mask)
fligst = numpy.empty(frlign.shape, dtype=numpy.float32)
fligst[:] = _TARGET_NODATA
fligst[valid_mask] = 1.
fligst[movt_mask] = numpy.minimum(
frlign[movt_mask] / (
d_strucc_lyr[movt_mask] / cpart[movt_mask]), 1.)
strlig_lyr_mod = numpy.empty(frlign.shape, dtype=numpy.float32)
strlig_lyr_mod[:] = _TARGET_NODATA
strlig_lyr_mod[valid_mask] = (
((strlig_lyr[valid_mask] * strucc_lyr[valid_mask]) +
(fligst[valid_mask] * d_strucc_lyr[valid_mask])) /
(strucc_lyr[valid_mask] + d_strucc_lyr[valid_mask]))
d_strlig_lyr = numpy.empty(frlign.shape, dtype=numpy.float32)
d_strlig_lyr[:] = _IC_NODATA
d_strlig_lyr[valid_mask] = (
strlig_lyr_mod[valid_mask] - strlig_lyr[valid_mask])
return d_strlig_lyr
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'dirabs_1', 'dirabs_2', 'd_metabc_lyr', 'd_strucc_lyr',
'd_struce_lyr_iel', 'd_statv_temp', 'operand_temp']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict = {}
for val in [
'damr_{}_1'.format(lyr), 'damr_{}_2'.format(lyr), 'pabres',
'damrmn_1', 'damrmn_2', 'spl_1', 'spl_2', 'rcestr_1',
'rcestr_2']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# direct absorption of N and P from surface mineral layer
for iel in [1, 2]:
if iel == 1:
epart_path = epart_1_path
else:
epart_path = epart_2_path
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cpart_path, epart_path,
sv_reg['minerl_1_{}_path'.format(iel)],
param_val_dict['damr_{}_{}'.format(lyr, iel)],
param_val_dict['pabres'],
param_val_dict['damrmn_{}'.format(iel)]]],
calc_dirabs, temp_val_dict['dirabs_{}'.format(iel)],
gdal.GDT_Float32, _TARGET_NODATA)
# remove direct absorption from surface mineral layer
shutil.copyfile(
sv_reg['minerl_1_{}_path'.format(iel)],
temp_val_dict['d_statv_temp'])
raster_difference(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['dirabs_{}'.format(iel)], _TARGET_NODATA,
sv_reg['minerl_1_{}_path'.format(iel)], _SV_NODATA)
# partition C into structural and metabolic
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cpart_path, epart_1_path, temp_val_dict['dirabs_1'],
frlign_path, param_val_dict['spl_1'],
param_val_dict['spl_2']]],
calc_d_metabc_lyr, temp_val_dict['d_metabc_lyr'], gdal.GDT_Float32,
_TARGET_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cpart_path, temp_val_dict['d_metabc_lyr']]],
calc_d_strucc_lyr, temp_val_dict['d_strucc_lyr'], gdal.GDT_Float32,
_TARGET_NODATA)
shutil.copyfile(
sv_reg['metabc_{}_path'.format(lyr)], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['d_metabc_lyr'], _TARGET_NODATA,
sv_reg['metabc_{}_path'.format(lyr)], _SV_NODATA)
shutil.copyfile(
sv_reg['strucc_{}_path'.format(lyr)], temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['d_strucc_lyr'], _TARGET_NODATA,
sv_reg['strucc_{}_path'.format(lyr)], _SV_NODATA)
# partition N and P into structural and metabolic
for iel in [1, 2]:
if iel == 1:
epart_path = epart_1_path
else:
epart_path = epart_2_path
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['d_strucc_lyr'],
param_val_dict['rcestr_{}'.format(iel)]]],
calc_d_struce_lyr_iel, temp_val_dict['d_struce_lyr_iel'],
gdal.GDT_Float32, _TARGET_NODATA)
shutil.copyfile(
sv_reg['struce_{}_{}_path'.format(lyr, iel)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['d_struce_lyr_iel'], _TARGET_NODATA,
sv_reg['struce_{}_{}_path'.format(lyr, iel)], _SV_NODATA)
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
cpart_path, epart_path,
temp_val_dict['dirabs_{}'.format(iel)],
temp_val_dict['d_struce_lyr_iel']]],
calc_d_metabe_lyr_iel, temp_val_dict['operand_temp'],
gdal.GDT_Float32, _TARGET_NODATA)
shutil.copyfile(
sv_reg['metabe_{}_{}_path'.format(lyr, iel)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
sv_reg['metabe_{}_{}_path'.format(lyr, iel)], _SV_NODATA)
# adjust fraction of lignin in receiving structural pool
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
frlign_path, temp_val_dict['d_strucc_lyr'], cpart_path,
sv_reg['strlig_{}_path'.format(lyr)],
sv_reg['strucc_{}_path'.format(lyr)]]],
calc_d_strlig_lyr, temp_val_dict['operand_temp'], gdal.GDT_Float32,
_IC_NODATA)
shutil.copyfile(
sv_reg['strlig_{}_path'.format(lyr)],
temp_val_dict['d_statv_temp'])
raster_sum(
temp_val_dict['d_statv_temp'], _SV_NODATA,
temp_val_dict['operand_temp'], _IC_NODATA,
sv_reg['strlig_{}_path'.format(lyr)], _SV_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_fall_standing_dead(stdedc, fallrt):
"""Calculate delta C with fall of standing dead.
Material falls from standing dead biomass into surface litter
according to a constant monthly fall rate.
Parameters:
stdedc (numpy.ndarray): state variable, C in standing dead material
fallrt (numpy.ndarray): parameter, fraction of standing dead material
that falls each month
Returns:
delta_c_standing_dead, change in C in standing dead
"""
valid_mask = (
(~numpy.isclose(stdedc, _SV_NODATA)) &
(fallrt != _IC_NODATA))
delta_c_standing_dead = numpy.empty(stdedc.shape, dtype=numpy.float32)
delta_c_standing_dead[:] = _TARGET_NODATA
delta_c_standing_dead[valid_mask] = stdedc[valid_mask] * fallrt[valid_mask]
return delta_c_standing_dead
def calc_root_death(
average_temperature, rtdtmp, rdr, avh2o_1, deck5, bglivc):
"""Calculate delta C with death of roots.
Material flows from roots into soil organic matter pools due to root death.
Root death rate is limited by average temperature and influenced by
available soil moisture. Change in C is calculated by multiplying the root
death rate by bglivc, C in live roots.
Parameters:
average_temperature (numpy.ndarray): derived, average temperature for
the current month
rtdtmp (numpy.ndarray): parameter, temperature below which root death
does not occur
rdr (numpy.ndarray): parameter, maximum root death rate at very dry
soil conditions
avh2o_1 (numpy.ndarray): state variable, water available to the current
plant functional type for growth
deck5 (numpy.ndarray): parameter, level of available soil water at
which root death rate is half maximum
bglivc (numpy.ndarray): state variable, C in belowground live roots
Returns:
delta_c_root_death, change in C during root death
"""
valid_mask = (
(average_temperature != _IC_NODATA) &
(rdr != _IC_NODATA) &
(~numpy.isclose(avh2o_1, _SV_NODATA)) &
(deck5 != _IC_NODATA) &
(~numpy.isclose(bglivc, _SV_NODATA)))
root_death_rate = numpy.empty(bglivc.shape, dtype=numpy.float32)
root_death_rate[:] = _TARGET_NODATA
root_death_rate[valid_mask] = 0.
temp_sufficient_mask = ((average_temperature >= rtdtmp) & valid_mask)
root_death_rate[temp_sufficient_mask] = numpy.minimum(
rdr[temp_sufficient_mask] *
(1.0 - avh2o_1[temp_sufficient_mask] / (
deck5[temp_sufficient_mask] + avh2o_1[temp_sufficient_mask])),
0.95)
delta_c_root_death = numpy.empty(bglivc.shape, dtype=numpy.float32)
delta_c_root_death[:] = _TARGET_NODATA
delta_c_root_death[valid_mask] = (
root_death_rate[valid_mask] * bglivc[valid_mask])
return delta_c_root_death
def calc_delta_iel(c_state_variable, iel_state_variable, delta_c):
"""Calculate the change in N or P accompanying change in C.
As C flows out of standing dead biomass or roots, the amount of iel
(N or P) flowing out of the same pool is calculated from the change in C
according to the ratio of C to iel in the pool.
Parameters:
c_state_variable (numpy.ndarray): state variable, C in the pool that is
losing material
iel_state_variable (numpy.ndarray): state variable, N or P in the pool
that is losing material
delta_c (numpy.ndarray): derived, change in C. Change in N or P is
proportional to this amount.
Returns:
delta_iel, change in N or P accompanying the change in C
"""
valid_mask = (
(~numpy.isclose(c_state_variable, _SV_NODATA)) &
(~numpy.isclose(iel_state_variable, _SV_NODATA)) &
(c_state_variable > 0) &
(delta_c != _TARGET_NODATA))
delta_iel = numpy.empty(c_state_variable.shape, dtype=numpy.float32)
delta_iel[:] = _TARGET_NODATA
delta_iel[valid_mask] = (
(iel_state_variable[valid_mask] / c_state_variable[valid_mask]) *
delta_c[valid_mask])
return delta_iel
def _death_and_partition(
state_variable, aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg):
"""Track movement of C, N and P from a pft-level state variable into soil.
Calculate C, N and P leaving the specified state variable and entering
surface or soil organic matter pools. Subtract the change in C, N and P
from the state variable tracked for each pft, sum up the amounts across
pfts, and distribute the sum of the material flowing from the state
variable to surface or soil structural and metabolic pools.
Parameters:
state_variable (string): string identifying the state variable that is
flowing into organic matter. Must be one of "stded" (for fall
of standing dead) or "bgliv" (for death of roots). If the state
variable is stded, material flowing from stded enters surface
structural and metabolic pools. If the state variable is bgliv,
material flowing from bgliv enters soil structural and metabolic
pools.
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including temperature,
plant functional type composition, and site spatial index
site_param_table (dict): map of site spatial indices to dictionaries
containing site parameters
current_month (int): month of the year, such that current_month=1
indicates January
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month
Side effects:
creates the rasters indicated by
sv_reg['<state_variable>c_<pft>_path'] for each pft
sv_reg['<state_variable>e_1_<pft>_path'] for each pft
sv_reg['<state_variable>e_2_<pft>_path'] for each pft
modifies the rasters indicated by
sv_reg['minerl_1_1_path']
sv_reg['minerl_1_2_path']
sv_reg['metabc_<lyr>_path']
sv_reg['strucc_<lyr>_path']
sv_reg['metabe_<lyr>_1_path']
sv_reg['metabe_<lyr>_2_path']
sv_reg['struce_<lyr>_1_path']
sv_reg['struce_<lyr>_2_path']
sv_reg['strlig_<lyr>_path']
where lyr=1 if `state_variable` == 'stded'
lyr=2 if `state_variable` == 'bgliv'
Returns:
None
"""
def calc_avg_temp(max_temp, min_temp):
"""Calculate average temperature from maximum and minimum temp."""
valid_mask = (
(~numpy.isclose(max_temp, max_temp_nodata)) &
(~numpy.isclose(min_temp, min_temp_nodata)))
tave = numpy.empty(max_temp.shape, dtype=numpy.float32)
tave[:] = _IC_NODATA
tave[valid_mask] = (max_temp[valid_mask] + min_temp[valid_mask]) / 2.
return tave
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'tave', 'delta_c', 'delta_iel', 'delta_sv_weighted',
'operand_temp', 'sum_weighted_delta_C', 'sum_weighted_delta_N',
'sum_weighted_delta_P', 'weighted_lignin', 'sum_lignin',
'fraction_lignin']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict = {}
# site-level parameters
val = 'deck5'
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _IC_NODATA)
# pft-level parameters
for val in['fallrt', 'rtdtmp', 'rdr']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
# sum of material across pfts to be partitioned to organic matter
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], temp_val_dict['sum_weighted_delta_C'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], temp_val_dict['sum_weighted_delta_N'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], temp_val_dict['sum_weighted_delta_P'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], temp_val_dict['sum_lignin'],
gdal.GDT_Float32, [_TARGET_NODATA], fill_value_list=[0])
max_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['max_temp_{}'.format(current_month)])['nodata'][0]
min_temp_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['min_temp_{}'.format(current_month)])['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
aligned_inputs['max_temp_{}'.format(current_month)],
aligned_inputs['min_temp_{}'.format(current_month)]]],
calc_avg_temp, temp_val_dict['tave'], gdal.GDT_Float32, _IC_NODATA)
for pft_i in pft_id_set:
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
# calculate change in C leaving the given state variable
if state_variable == 'stded':
fill_val = veg_trait_table[pft_i]['fallrt']
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], param_val_dict['fallrt'],
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['stdedc_{}_path'.format(pft_i)],
param_val_dict['fallrt']]],
calc_fall_standing_dead, temp_val_dict['delta_c'],
gdal.GDT_Float32, _TARGET_NODATA)
else:
for val in ['rtdtmp', 'rdr']:
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], param_val_dict[val],
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
temp_val_dict['tave'],
param_val_dict['rtdtmp'],
param_val_dict['rdr'],
sv_reg['avh2o_1_{}_path'.format(pft_i)],
param_val_dict['deck5'],
prev_sv_reg['bglivc_{}_path'.format(pft_i)]]],
calc_root_death, temp_val_dict['delta_c'],
gdal.GDT_Float32, _TARGET_NODATA)
# subtract delta_c from the pft-level state variable
raster_difference(
prev_sv_reg['{}c_{}_path'.format(state_variable, pft_i)],
_SV_NODATA, temp_val_dict['delta_c'], _TARGET_NODATA,
sv_reg['{}c_{}_path'.format(state_variable, pft_i)], _SV_NODATA)
# calculate delta C weighted by % cover of this pft
raster_multiplication(
temp_val_dict['delta_c'], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA)
shutil.copyfile(
temp_val_dict['sum_weighted_delta_C'],
temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
temp_val_dict['sum_weighted_delta_C'], _TARGET_NODATA)
# calculate weighted fraction of flowing C which is lignin
if state_variable == 'stded':
frlign_path = year_reg['pltlig_above_{}'.format(pft_i)]
else:
frlign_path = year_reg['pltlig_below_{}'.format(pft_i)]
raster_multiplication(
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA,
frlign_path, _TARGET_NODATA,
temp_val_dict['weighted_lignin'], _TARGET_NODATA)
shutil.copyfile(
temp_val_dict['sum_lignin'], temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['weighted_lignin'], _TARGET_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
temp_val_dict['sum_lignin'], _TARGET_NODATA)
for iel in [1, 2]:
# calculate N or P flowing out of the pft-level state variable
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['{}c_{}_path'.format(state_variable, pft_i)],
prev_sv_reg['{}e_{}_{}_path'.format(
state_variable, iel, pft_i)],
temp_val_dict['delta_c']]],
calc_delta_iel, temp_val_dict['delta_iel'],
gdal.GDT_Float32, _TARGET_NODATA)
# subtract delta_iel from the pft-level state variable
raster_difference(
prev_sv_reg['{}e_{}_{}_path'.format(
state_variable, iel, pft_i)], _SV_NODATA,
temp_val_dict['delta_iel'], _TARGET_NODATA,
sv_reg['{}e_{}_{}_path'.format(state_variable, iel, pft_i)],
_SV_NODATA)
# calculate delta iel weighted by % cover of this pft
raster_multiplication(
temp_val_dict['delta_iel'], _TARGET_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA)
if iel == 1:
shutil.copyfile(
temp_val_dict['sum_weighted_delta_N'],
temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
temp_val_dict['sum_weighted_delta_N'], _TARGET_NODATA)
else:
shutil.copyfile(
temp_val_dict['sum_weighted_delta_P'],
temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['delta_sv_weighted'], _TARGET_NODATA,
temp_val_dict['operand_temp'], _TARGET_NODATA,
temp_val_dict['sum_weighted_delta_P'], _TARGET_NODATA)
# partition sum of C, N and P into structural and metabolic pools
if state_variable == 'stded':
lyr = 1
else:
lyr = 2
raster_division(
temp_val_dict['sum_lignin'], _TARGET_NODATA,
temp_val_dict['sum_weighted_delta_C'], _TARGET_NODATA,
temp_val_dict['fraction_lignin'], _TARGET_NODATA)
partit(
temp_val_dict['sum_weighted_delta_C'],
temp_val_dict['sum_weighted_delta_N'],
temp_val_dict['sum_weighted_delta_P'],
temp_val_dict['fraction_lignin'],
aligned_inputs['site_index'], site_param_table, lyr, sv_reg)
# clean up temporary files
shutil.rmtree(temp_dir)
def calc_senescence_water_shading(
aglivc, bgwfunc, fsdeth_1, fsdeth_3, fsdeth_4):
"""Calculate shoot death due to water stress and shading.
In months where senescence is not scheduled to occur, some shoot death
may still occur due to water stress and shading.
Parameters:
aglivc (numpy.ndarray): state variable, carbon in aboveground live
biomass
bgwfunc (numpy.ndarray): derived, effect of soil moisture on
decomposition and shoot senescence
fsdeth_1 (numpy.ndarray): parameter, maximum shoot death rate at very
dry soil conditions
fsdeth_3 (numpy.ndarray): parameter, additional fraction of shoots
which die when aglivc is greater than fsdeth_4
fsdeth_4 (numpy.ndarray): parameter, threshold value for aglivc
above which shading increases senescence
Returns:
fdeth, fraction of aboveground live biomass that is converted to
standing dead
"""
valid_mask = (
(~numpy.isclose(aglivc, _SV_NODATA)) &
(bgwfunc != _TARGET_NODATA) &
(fsdeth_1 != _IC_NODATA) &
(fsdeth_3 != _IC_NODATA) &
(fsdeth_4 != _IC_NODATA))
fdeth = numpy.empty(aglivc.shape, dtype=numpy.float32)
fdeth[:] = _TARGET_NODATA
fdeth[valid_mask] = fsdeth_1[valid_mask] * (1. - bgwfunc[valid_mask])
shading_mask = ((aglivc > fsdeth_4) & valid_mask)
fdeth[shading_mask] = fdeth[shading_mask] + fsdeth_3[shading_mask]
fdeth[valid_mask] = numpy.minimum(fdeth[valid_mask], 1.)
return fdeth
def _shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
sv_reg):
"""Senescence of live material to standing dead.
Live aboveground biomass is converted to standing dead according to
senescence, which is specified for each pft to occur in one or more months
of the year. In other months, some senescence may occur because of water
stress or shading. During senescence, C, N and P move from agliv to stded
state variables.
Parameters:
pft_id_set (set): set of integers identifying plant functional types
veg_trait_table (dict): map of pft id to dictionaries containing
plant functional type parameters
prev_sv_reg (dict): map of key, path pairs giving paths to state
variables for the previous month
month_reg (dict): map of key, path pairs giving paths to intermediate
calculated values that are shared between submodels
current_month (int): month of the year, such that current_month=1
indicates January
sv_reg (dict): map of key, path pairs giving paths to state variables
for the current month
Side effects:
creates the rasters indicated by
sv_reg['aglivc_<pft>_path'] for each pft
sv_reg['aglive_1_<pft>_path'] for each pft
sv_reg['aglive_2_<pft>_path'] for each pft
sv_reg['crpstg_1_<pft>_path'] for each pft
sv_reg['crpstg_2_<pft>_path'] for each pft
modifies the rasters indicated by
sv_reg['stdedc_<pft>_path'] for each pft
sv_reg['stdede_1_<pft>_path'] for each pft
sv_reg['stdede_2_<pft>_path'] for each pft
Returns:
None
"""
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for val in [
'operand_temp', 'fdeth', 'delta_c', 'delta_iel', 'vol_loss',
'to_storage', 'to_stdede']:
temp_val_dict[val] = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict = {}
for val in[
'fsdeth_1', 'fsdeth_2', 'fsdeth_3', 'fsdeth_4', 'vlossp',
'crprtf_1', 'crprtf_2']:
for pft_i in pft_id_set:
target_path = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
param_val_dict['{}_{}'.format(val, pft_i)] = target_path
fill_val = veg_trait_table[pft_i][val]
pygeoprocessing.new_raster_from_base(
prev_sv_reg['aglivc_{}_path'.format(pft_i)], target_path,
gdal.GDT_Float32, [_IC_NODATA], fill_value_list=[fill_val])
for pft_i in pft_id_set:
if current_month == veg_trait_table[pft_i]['senescence_month']:
temp_val_dict['fdeth'] = param_val_dict[
'fsdeth_2_{}'.format(pft_i)]
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
prev_sv_reg['aglivc_{}_path'.format(pft_i)],
month_reg['bgwfunc'],
param_val_dict['fsdeth_1_{}'.format(pft_i)],
param_val_dict['fsdeth_3_{}'.format(pft_i)],
param_val_dict['fsdeth_4_{}'.format(pft_i)]]],
calc_senescence_water_shading, temp_val_dict['fdeth'],
gdal.GDT_Float32, _TARGET_NODATA)
# change in C flowing from aboveground live biomass to standing dead
raster_multiplication(
temp_val_dict['fdeth'], _TARGET_NODATA,
prev_sv_reg['aglivc_{}_path'.format(pft_i)], _SV_NODATA,
temp_val_dict['delta_c'], _TARGET_NODATA)
raster_difference(
prev_sv_reg['aglivc_{}_path'.format(pft_i)], _SV_NODATA,
temp_val_dict['delta_c'], _TARGET_NODATA,
sv_reg['aglivc_{}_path'.format(pft_i)], _SV_NODATA)
shutil.copyfile(
sv_reg['stdedc_{}_path'.format(pft_i)],
temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['operand_temp'], _SV_NODATA,
temp_val_dict['delta_c'], _TARGET_NODATA,
sv_reg['stdedc_{}_path'.format(pft_i)], _SV_NODATA)
for iel in [1, 2]:
# change in N or P flowing from aboveground live biomass to dead
raster_multiplication(
temp_val_dict['fdeth'], _TARGET_NODATA,
prev_sv_reg['aglive_{}_{}_path'.format(iel, pft_i)],
_SV_NODATA, temp_val_dict['delta_iel'], _TARGET_NODATA)
raster_difference(
prev_sv_reg['aglive_{}_{}_path'.format(iel, pft_i)],
_SV_NODATA, temp_val_dict['delta_iel'], _TARGET_NODATA,
sv_reg['aglive_{}_{}_path'.format(iel, pft_i)], _SV_NODATA)
if iel == 1:
# volatilization loss of N
raster_multiplication(
temp_val_dict['delta_iel'], _TARGET_NODATA,
param_val_dict['vlossp_{}'.format(pft_i)], _IC_NODATA,
temp_val_dict['vol_loss'], _TARGET_NODATA)
shutil.copyfile(
temp_val_dict['delta_iel'], temp_val_dict['operand_temp'])
raster_difference(
temp_val_dict['operand_temp'], _TARGET_NODATA,
temp_val_dict['vol_loss'], _TARGET_NODATA,
temp_val_dict['delta_iel'], _TARGET_NODATA)
# a fraction of N and P goes to crop storage
raster_multiplication(
temp_val_dict['delta_iel'], _TARGET_NODATA,
param_val_dict['crprtf_{}_{}'.format(iel, pft_i)], _IC_NODATA,
temp_val_dict['to_storage'], _TARGET_NODATA)
raster_sum(
prev_sv_reg['crpstg_{}_{}_path'.format(iel, pft_i)],
_SV_NODATA, temp_val_dict['to_storage'], _TARGET_NODATA,
sv_reg['crpstg_{}_{}_path'.format(iel, pft_i)], _SV_NODATA)
# the rest goes to standing dead biomass
raster_difference(
temp_val_dict['delta_iel'], _TARGET_NODATA,
temp_val_dict['to_storage'], _TARGET_NODATA,
temp_val_dict['to_stdede'], _TARGET_NODATA)
shutil.copyfile(
sv_reg['stdede_{}_{}_path'.format(iel, pft_i)],
temp_val_dict['operand_temp'])
raster_sum(
temp_val_dict['operand_temp'], _SV_NODATA,
temp_val_dict['to_stdede'], _TARGET_NODATA,
sv_reg['stdede_{}_{}_path'.format(iel, pft_i)], _SV_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def convert_biomass_to_C(biomass_path, c_path):
"""Convert from grams of biomass to grams of carbon.
The root:shoot submodel calculates potential growth in units of grams of
biomass, but the growth submodel calculates actual growth from that
potential growth in units of grams of carbon. Convert biomass to carbon
using a conversion factor of 2.5.
Parameters:
biomass_path (string): path to raster containing grams of
biomass
c_path (string): path to raster that should contain the equivalent
grams of carbon
Side effects:
modifies or creates the raster indicated by `c_path`
Returns:
None
"""
def convert_op(biomass):
"""Convert grams of biomass to grams of carbon."""
valid_mask = (biomass != _TARGET_NODATA)
carbon = numpy.empty(biomass.shape, dtype=numpy.float32)
carbon[:] = _TARGET_NODATA
carbon[valid_mask] = biomass[valid_mask] / 2.5
return carbon
pygeoprocessing.raster_calculator(
[(biomass_path, 1)], convert_op, c_path, gdal.GDT_Float32,
_TARGET_NODATA)
def restrict_potential_growth(potenc, availm_1, availm_2, snfxmx_1):
"""Restrict potential growth according to mineral nutrients.
Limit potential growth by the availability of mineral N and P. Growth only
occurs if there is some availability of both mineral elements. Line 63
Restrp.f
Parameters:
potenc (numpy.ndarray): potential C production (g C)
availm_1 (numpy.ndarray): derived, total mineral N available to this
pft
availm_2 (numpy.ndarray): derived, total mineral P available to this
pft
snfxmx_1 (numpy.ndarray): parameter, maximum symbiotic N fixation rate
Returns:
potenc_lim_minerl, potential C production limited by availability of
mineral nutrients
"""
valid_mask = (
(potenc != _TARGET_NODATA) &
(availm_1 != _TARGET_NODATA) &
(availm_2 != _TARGET_NODATA) &
(snfxmx_1 != _IC_NODATA))
potenc_lim_minerl = numpy.empty(potenc.shape, dtype=numpy.float32)
potenc_lim_minerl[:] = _TARGET_NODATA
potenc_lim_minerl[valid_mask] = 0
growth_mask = (
((availm_1 > 0) | (snfxmx_1 > 0)) &
(availm_2 > 0) &
valid_mask)
potenc_lim_minerl[growth_mask] = potenc[growth_mask]
return potenc_lim_minerl
def c_uptake_aboveground(cprodl, rtsh):
"""Calculate uptake of C from atmosphere to aboveground live biomass.
Given total C predicted to flow into new growth and the root:shoot ratio
of new growth, calculate the flow of C from the atmosphere into aboveground
live biomass. Lines 137-146 Growth.f
Parameters:
cprodl (numpy.ndarray): derived, c production limited by nutrient
availability
rtsh (numpy.ndarray): derived, root/shoot ratio of new production
Returns:
delta_aglivc, change in C in aboveground live biomass
"""
valid_mask = (
(cprodl != _TARGET_NODATA) &
(rtsh != _TARGET_NODATA))
delta_aglivc = numpy.empty(cprodl.shape, dtype=numpy.float32)
delta_aglivc[:] = _TARGET_NODATA
delta_aglivc[valid_mask] = (
cprodl[valid_mask] * (1. - rtsh[valid_mask] / (rtsh[valid_mask] + 1.)))
return delta_aglivc
def c_uptake_belowground(bglivc, cprodl, rtsh):
"""Do uptake of C from atmosphere to belowground live biomass.
Given total C predicted to flow into new growth and the root:shoot ratio
of new growth, perform the flow of C from the atmosphere into belowground
live biomass. Lines 148-156 Growth.f
Parameters:
bglivc (numpy.ndarray): state variable, existing C in belowground live
biomass
cprodl (numpy.ndarray): derived, c production limited by nutrient
availability
rtsh (numpy.ndarray): derived, root/shoot ratio of new production
Returns:
modified_bglivc, modified C in belowground live biomass
"""
valid_mask = (
(~numpy.isclose(bglivc, _SV_NODATA)) &
(cprodl != _TARGET_NODATA) &
(rtsh != _TARGET_NODATA))
c_prod_belowground = numpy.empty(bglivc.shape, dtype=numpy.float32)
c_prod_belowground[:] = _TARGET_NODATA
c_prod_belowground[valid_mask] = (
cprodl[valid_mask] * (rtsh[valid_mask] / (rtsh[valid_mask] + 1.)))
modified_bglivc = numpy.empty(bglivc.shape, dtype=numpy.float32)
modified_bglivc[:] = _SV_NODATA
modified_bglivc[valid_mask] = (
bglivc[valid_mask] + c_prod_belowground[valid_mask])
return modified_bglivc
def calc_uptake_source(return_type):
"""Calculate uptake of nutrient from available sources."""
def _uptake(
eavail_iel, eup_above_iel, eup_below_iel, plantNfix, storage_iel,
iel):
"""Calculate N or P taken up from one source.
Given the N or P predicted to flow into new above- and belowground
production, calculate how much of that nutrient will be taken from the
crop storage pool and how much will be taken from soil. For N, some of
the necessary uptake maybe also come from symbiotic N fixation.
Parameters:
eavail_iel (numpy.ndarray): derived, total iel available to this
plant functional type
eup_above_iel (numpy.ndarray): derived, iel in new aboveground
production
eup_below_iel (numpy.ndarray): derived, iel in new belowground
production
plantNfix (numpy.ndarray): derived, symbiotic N fixed by this plant
functional type
storage_iel (numpy.ndarray): state variable, iel in crop storage
pool
iel (integer): index identifying N or P
Returns:
uptake_storage, uptake from crop storage pool, if return_type is
'uptake_storage'
uptake_soil, uptake from mineral content of soil layers accessible
by the plant function type, if return_type is 'uptake_soil'
uptake_Nfix, uptake from symbiotically fixed nitrogen, if
return_type is 'uptake_Nfix'
"""
valid_mask = (
(eup_above_iel != _TARGET_NODATA) &
(eup_below_iel != _TARGET_NODATA) &
(plantNfix != _TARGET_NODATA) &
(~numpy.isclose(storage_iel, _SV_NODATA)))
eprodl_iel = numpy.empty(eup_above_iel.shape, dtype=numpy.float32)
eprodl_iel[:] = _TARGET_NODATA
eprodl_iel[valid_mask] = (
eup_above_iel[valid_mask] + eup_below_iel[valid_mask])
uptake_storage = | numpy.empty(eup_above_iel.shape, dtype=numpy.float32) | numpy.empty |
"""
Analyze spike shapes - pulled out of IVCurve 2/6/2016 pbm.
Allows routine to be used to analyze spike trains independent of acq4's data models.
Create instance, then call setup to define the "Clamps" object and the spike threshold.
The Clamps object must have the following variables defined:
commandLevels (current injection levels, list)
time_base (np.array of times corresponding to traces)
data_mode (string, indicating current or voltgae clamp)
tstart (time for start of looking at spikes; ms)
tend (time to stop looking at spikes; ms)
trace (the data trace itself, numpy array records x points)
sample_interval (time between samples, sec)
values (command waveforms; why it is called this in acq4 is a mystery)
Note that most of the results from this module are accessed either
as class variables, or through the class variable analysis_summary,
a dictionary with key analysis results.
IVCurve uses the analysis_summary to post results to an sql database.
<NAME>, Ph.D. 2016-2019
for Acq4 (and beyond)
"""
from collections import OrderedDict
import os
import os.path
from pathlib import Path
import inspect
import sys
import itertools
import functools
import numpy as np
import scipy
from . import Utility # pbm's utilities...
from . import Fitting # pbm's fitting stuff...
import pprint
import time
this_source_file = 'ephysanalysis.SpikeAnalysisrc'
class SpikeAnalysis():
def __init__(self):
pass
self.threshold = 0.
self.Clamps = None
self.analysis_summary = {}
self.verbose = False
self.FIGrowth = 1 # use function FIGrowth1 (can use simpler version FIGrowth 2 also)
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
self.detector = 'argrelmax'
def setup(self, clamps=None, threshold=None, refractory:float=0.0007, peakwidth:float=0.001,
verify=False, interpolate=True, verbose=False, mode='peak', min_halfwidth=0.010,
data_time_units:str = 's', data_volt_units:str='V'):
"""
configure the inputs to the SpikeAnalysis class
Parameters
---------
clamps : class (default: None)
PatchEphys clamp data holding/accessing all ephys data for this analysis
threshold : float (default: None)
Voltage threshold for spike detection
refractory : float (default 0.0007)
Minimum time between detected spikes, in seconds (or units of the clamp
time base)
peakwidth : float (default: 0.001)
When using "peak" as method in findspikes, this is the peak width maximum in sec
min_halfwidth : float (default: 0.010)
minimum spike half width in seconds. Default value is deliberately large...
verify : boolean (default: False)
interpolate : boolean (default: True)
Use interpolation to get spike threshold time and half-widths
mode : string (default: 'peak')
if using detector "peak", this is mode passed to findspikes
verbose : boolean (default: False)
Set true to get lots of print out while running - used
mostly for debugging.
"""
if clamps is None or threshold is None:
raise ValueError("Spike Analysis requires defined clamps and threshold")
self.Clamps = clamps
assert data_time_units in ['s', 'ms']
assert data_volt_units in ['V', 'mV']
self.time_units = data_time_units
self.volt_units = data_volt_units # needed by spike detector for data conversion
self.threshold = threshold
self.refractory = refractory
self.interpolate = interpolate # use interpolation on spike thresholds...
self.peakwidth = peakwidth
self.min_halfwidth = min_halfwidth
self.verify = verify
self.verbose = verbose
self.mode = mode
self.ar_window = 0.1
self.ar_lastspike = 0.075
self.min_peaktotrough = 0.010 # change in V on falling phase to be considered a spike
self.max_spike_look = 0.010 # msec over which to measure spike widths
def set_detector(self, detector:str='argrelmax'):
assert detector in ['argrelmax', 'threshold', 'Kalluri']
self.detector = detector
def analyzeSpikes(self, reset=True):
"""
analyzeSpikes: Using the threshold set in the control panel, count the
number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend)
Updates the spike plot(s).
The following class variables are modified upon successful analysis and return:
self.spikecount: a 1-D numpy array of spike counts, aligned with the
current (command)
self.adapt_ratio: the adaptation ratio of the spike train
self.fsl: a numpy array of first spike latency for each command level
self.fisi: a numpy array of first interspike intervals for each
command level
self.nospk: the indices of command levels where no spike was detected
self.spk: the indices of command levels were at least one spike
was detected
self.analysis_summary : Dictionary of results.
Parameters
----------
None
Returns
-------
Nothing, but see the list of class variables that are modified
"""
if reset:
self.analysis_summary['FI_Growth'] = [] # permit analysis of multiple growth functions.
twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds
maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second
minspk = 4
maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts
#print('max spike rate: ', maxspk)
ntr = len(self.Clamps.traces)
self.spikecount = np.zeros(ntr)
self.fsl = np.zeros(ntr)
self.fisi = np.zeros(ntr)
ar = np.zeros(ntr)
self.allisi = []
self.spikes = [[] for i in range(ntr)]
self.spikeIndices = [[] for i in range(ntr)]
#print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend
lastspikecount = 0
U = Utility.Utility()
for i in range(ntr): # this is where we should parallelize the analysis for spikes
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=self.Clamps.tstart,
t1=self.Clamps.tend,
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
mindip = 1e-2,
refract=self.refractory,
peakwidth=self.peakwidth,
data_time_units=self.time_units,
data_volt_units=self.volt_units,
verify=self.verify,
debug=False)
# print (ntr, i, self.Clamps.values[i], len(spikes))
if len(spikes) == 0:
# print ('no spikes found')
continue
spikes = np.array(spikes)
self.spikes[i] = spikes
# print 'found %d spikes in trace %d' % (len(spikes), i)
self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes]
self.spikecount[i] = len(spikes)
self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3
if len(spikes) > 1:
self.fisi[i] = (spikes[1] - spikes[0])*1e3 # first ISI
self.allisi.append(np.diff(spikes)*1e3)
# for Adaptation ratio analysis: limit spike rate, and also only on monotonic increase in rate
# 8/2018:
# AR needs to be tethered to time into stimulus
# Here we return a standardized ar measured during the first 100 msec
# (standard ar)
if (minspk <= len(spikes)) and (self.spikecount[i] > lastspikecount):
spx = spikes[np.where(spikes-self.Clamps.tstart < self.ar_window)] # default is 100 msec
if len(spx) >= 4: # at least 4 spikes
if spx[-1] > self.ar_lastspike+self.Clamps.tstart: # default 75 msec
misi = np.mean(np.diff(spx[-2:]))*1e3 # last ISIs in the interval
ar[i] = misi / self.fisi[i]
lastspikecount = self.spikecount[i] # update rate (sets max rate)
iAR = np.where(ar > 0) # valid AR and monotonically rising
self.adapt_ratio = np.nan
if len(ar[iAR]) > 0:
self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement
self.ar = ar # stores all the ar values
self.analysis_summary['AdaptRatio'] = self.adapt_ratio # only the valid values
self.nospk = np.where(self.spikecount == 0)
self.spk = np.where(self.spikecount > 0)[0]
self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount])
self.analysis_summary['FiringRate'] = np.max(self.spikecount)/(self.Clamps.tend - self.Clamps.tstart)
self.spikes_counted = True
# self.update_SpikePlots()
def analyzeSpikes_brief(self, mode='baseline'):
"""
analyzeSpikes_brief: Using the threshold set in the control panel, count the
number of spikes in a window and fill out ana analysis summary dict with
the spike latencies in that window (from 0 time)
Parameters
----------
mode: str (default : baseline)
baseline: from 0 to self.Clamps.tstart
poststimulus : from self.Clamps.tend to end of trace
evoked : from self.Clamps.start to self.Clamps.end
Returns:
-------
Nothing, but see the list of class variables that are modified
Class variable modified is the
self.analysis_summary : Dictionary of spike times. Key is
'spikes_baseline'
'spikes_poststimulus'
'spikes_evoked'
according to the mode in the call
"""
if mode == 'baseline':
twin = [0., self.Clamps.tstart]
elif mode == 'evoked':
twin = [self.Clamps.tstart,self.Clamps.tend]
elif mode == 'poststimulus':
twin = [self.Clamps.tend, np.max(self.Clamps.time_base)]
else:
raise ValueError(f'{thissourcefile:s}:: analyzeSpikes_brief requires mode to be "baseline", "evoked", or "poststimulus"')
ntr = len(self.Clamps.traces)
allspikes = [[] for i in range(ntr)]
spikeIndices = [[] for i in range(ntr)]
U = Utility.Utility()
for i in range(ntr):
spikes = U.findspikes(self.Clamps.time_base, np.array(self.Clamps.traces[i]),
self.threshold, t0=twin[0],
t1=twin[1],
dt=self.Clamps.sample_interval,
mode=self.mode, # mode to use for finding spikes
interpolate=self.interpolate,
detector=self.detector,
refract=self.refractory,
peakwidth=self.peakwidth,
verify=self.verify,
debug=False)
if len(spikes) == 0:
#print 'no spikes found'
continue
allspikes[i] = spikes
self.analysis_summary[mode+'_spikes'] = allspikes
def _timeindex(self, t):
"""
Find the index into the time_base of the Clamps structure that
corresponds to the time closest to t
Parameters
----------
t : float (time, no default)
Returns
-------
index : int (index to the closest time)
"""
return np.argmin(self.Clamps.time_base-t)
def _initialize_summarymeasures(self):
self.analysis_summary['AP1_Latency'] = np.inf
self.analysis_summary['AP1_HalfWidth'] = np.inf
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.inf
self.analysis_summary['AP2_Latency'] = np.inf
self.analysis_summary['AP2_HalfWidth'] = np.inf
self.analysis_summary['AP2_HalfWidth_interpolated'] = np.inf
self.analysis_summary['FiringRate_1p5T'] = np.inf
self.analysis_summary['AHP_Depth'] = np.inf # convert to mV
def analyzeSpikeShape(self, printSpikeInfo=False, begin_dV=12.0):
"""analyze the spike shape.
Does analysis of ONE protocol, all traces.
Based on the analysis from Druckman et al. Cerebral Cortex, 2013
The results of the analysis are stored in the SpikeAnalysis object
as SpikeAnalysis.analysis_summary, a dictionary with specific keys.
Also available are the raw spike measures, in the 'spikes' dictionary
of the analysis_summary (spike shape dict, with keys by trace number,
each trace with a dict of values)
Every spike is measured, and a number of points on the waveform
are defined for each spike, including the peak, the half-width
on the rising phase, half-width on the falling phase, the
peak of the AHP, the peak-trough time (AP peak to AHP peak),
and a beginning, based on the slope (set in begin_dV)
Parameters
----------
printSpikeInfo : Boolean (default: Fase)
Flag; when set prints arrays, etc, for debugging purposes
begin_dV : float (default: 12 mV/ms)
Slope used to define onset of the spike. The default value
is from Druckmann et al; change this at your own peril!
Returns
-------
Nothing (but see doc notes above)
"""
self._initialize_summarymeasures()
self.madeplot = False
ntr = len(self.Clamps.traces)
# print 'analyzespikeshape, self.spk: ', self.spk
self.spikeShape = OrderedDict()
rmps = np.zeros(ntr)
self.iHold_i = np.zeros(ntr)
U = Utility.Utility()
for i in range(ntr):
# print('rec nspk: ', i, len(self.spikes[i]))
if len(self.spikes[i]) == 0:
continue
if printSpikeInfo:
print(f'{this_source_file:s}:: spikes: ', self.spikes[i])
print((np.array(self.Clamps.values)))
print((len(self.Clamps.traces)))
(rmps[i], r2) = U.measure('mean', self.Clamps.time_base, self.Clamps.traces[i],
0.0, self.Clamps.tstart)
(self.iHold_i[i], r2) = U.measure('mean', self.Clamps.time_base, self.Clamps.cmd_wave[i],
0.0, self.Clamps.tstart)
trspikes = OrderedDict()
for j in range(len(self.spikes[i])):
# print('i,j,etc: ', i, j, begin_dV)
thisspike = self.analyze_one_spike(i, j, begin_dV)
if thisspike is not None:
trspikes[j] = thisspike
self.spikeShape[i] = trspikes
self.iHold = np.mean(self.iHold_i)
self.analysis_summary['spikes'] = self.spikeShape # save in the summary dictionary too
self.analysis_summary['iHold'] = self.iHold
self.analysis_summary['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart
if len(self.spikeShape.keys()) > 0: # only try to classify if there are spikes
self.getClassifyingInfo() # build analysis summary here as well.
if printSpikeInfo:
pp = pprint.PrettyPrinter(indent=4)
for m in sorted(self.spikeShape.keys()):
print(('----\nTrace: %d has %d APs' % (m, len(list(self.spikeShape[m].keys())))))
for n in sorted(self.spikeShape[m].keys()):
pp.pprint(self.spikeShape[m][n])
def analyze_one_spike(self, i, j, begin_dV):
thisspike = {'trace': i, 'AP_number': j, 'AP_beginIndex': None, 'AP_endIndex': None,
'AP_peakIndex': None, 'peak_T': None, 'peak_V': None, 'AP_Latency': None,
'AP_beginV': None, 'halfwidth': None, 'halfwidth_interpolated': None,
'trough_T': None, 'trough_V': None, 'peaktotrough': None,
'current': None, 'iHold': None,
'pulseDuration': None, 'tstart': self.Clamps.tstart} # initialize the structure
thisspike['current'] = self.Clamps.values[i] - self.iHold_i[i]
thisspike['iHold'] = self.iHold_i[i]
thisspike['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart # in seconds
thisspike['AP_peakIndex'] = self.spikeIndices[i][j]
thisspike['peak_T'] = self.Clamps.time_base[thisspike['AP_peakIndex']]
thisspike['peak_V'] = self.Clamps.traces[i][thisspike['AP_peakIndex']] # max voltage of spike
thisspike['tstart'] = self.Clamps.tstart
# find the minimum going forward - that is AHP min
dt = (self.Clamps.time_base[1]-self.Clamps.time_base[0])
dv = np.diff(self.Clamps.traces[i])/dt
# find end of spike (either top of next spike, or end of trace)
k = self.spikeIndices[i][j] + 1 # point to next spike
if j < self.spikecount[i] - 1: #
kend = self.spikeIndices[i][j+1]
else:
kend = int(self.spikeIndices[i][j]+self.max_spike_look/dt)
if kend >= dv.shape[0]:
return(thisspike) # end of spike would be past end of trace
else:
if kend < k:
kend = k + 1
km = np.argmin(dv[k:kend]) + k
# Find trough after spike and calculate peak to trough
kmin = np.argmin(self.Clamps.traces[i][km:kend])+km
thisspike['AP_endIndex'] = kmin
thisspike['trough_T'] = self.Clamps.time_base[thisspike['AP_endIndex']]
thisspike['trough_V'] = self.Clamps.traces[i][kmin]
if thisspike['AP_endIndex'] is not None:
thisspike['peaktotrough'] = thisspike['trough_T'] - thisspike['peak_T']
# find points on spike waveform
# because index is to peak, we look for previous spike
k = self.spikeIndices[i][j]
# print('i, j, spikeindices: ', i, j, self.spikeIndices[i][j])
# print('k: dt: ', k, dt)
if j > 0:
kbegin = self.spikeIndices[i][j-1] # index to previous spike start
else:
kbegin = k - int(0.002/dt) # for first spike - 2 msec prior only
if k <= kbegin:
k = kbegin + 2
if k > len(dv): # end of block of data, so can not measure
return(thisspike)
# print('kbegin, k: ', kbegin, k)
try:
km = np.argmax(dv[kbegin:k]) + kbegin
except:
print(f'{this_source_file:s}:: kbdgin, k: ', kbegin, k)
print(len(dv))
raise
if ((km - kbegin) < 1):
km = kbegin + int((k - kbegin)/2.) + 1
kthresh = np.argmin(np.fabs(dv[kbegin:km] - begin_dV)) + kbegin # point where slope is closest to begin
# print('kthresh, kbegin: ', kthresh, kbegin)
# save values in dict here
thisspike['AP_beginIndex'] = kthresh
thisspike['AP_Latency'] = self.Clamps.time_base[kthresh]
thisspike['AP_beginV'] = self.Clamps.traces[i][thisspike['AP_beginIndex']]
# if successful in defining spike start/end, calculate half widths in two ways:
# closest points in raw data, and by interpolation
if (
(thisspike['AP_beginIndex'] is not None) and
(thisspike['AP_beginIndex'] > 0) and
(thisspike['AP_endIndex'] is not None) and
(thisspike['AP_beginIndex'] < thisspike['AP_peakIndex']) and
(thisspike['AP_peakIndex'] < thisspike['AP_endIndex'])
):
halfv = 0.5*(thisspike['peak_V'] + thisspike['AP_beginV'])
tr = np.array(self.Clamps.traces[i])
xr = self.Clamps.time_base
kup = np.argmin(np.fabs(tr[thisspike['AP_beginIndex']:thisspike['AP_peakIndex']] - halfv))
kup += thisspike['AP_beginIndex']
kdown = np.argmin(np.fabs(tr[thisspike['AP_peakIndex']:thisspike['AP_endIndex']] - halfv))
kdown += thisspike['AP_peakIndex']
if kup is not None and kdown is not None:
thisspike['halfwidth'] = xr[kdown] - xr[kup]
thisspike['hw_up'] = xr[kup] - xr[thisspike['AP_peakIndex']]
thisspike['hw_down'] = xr[thisspike['AP_peakIndex']] - xr[kdown]
thisspike['hw_v'] = halfv
# interpolated spike hwup, down and width
pkt = xr[thisspike['AP_peakIndex']]
if tr[kup] <= halfv:
vi = tr[kup-1:kup+1]
xi = xr[kup-1:kup+1]
else:
vi = tr[kup:kup+2]
xi = xr[kup:kup+2]
m1 = (vi[1]-vi[0])/(xi[1]-xi[0])
b1 = vi[1] - m1*xi[1]
if m1 == 0.0 or np.std(tr) == 0.0:
# print('a: ', vi[1], vi[0], kup, tr[kup:kup+2], tr[kup-1:kup+1], tr[kup], halfv)
return(thisspike)
t_hwup = (halfv-b1)/m1
if tr[kdown] <= halfv:
vi = tr[kdown:kdown+2]
xi = xr[kdown:kdown+2]
u='a'
else:
vi = tr[kdown-1:kdown+1]
xi = xr[kdown-1:kdown+1]
u='b'
m2 = (vi[1]-vi[0])/(xi[1]-xi[0])
b2 = vi[1] - m2*xi[1]
if m2 == 0.0 or np.std(tr) == 0.0:
# print('b: ', vi[1], vi[0], kup , tr[kdown-1:kdown+1], tr[kdown:kdown+2], tr[kdown], halfv)
return(thisspike)
t_hwdown = (halfv-b2)/m2
thisspike['halfwidth'] = t_hwdown-t_hwup
# import matplotlib.pyplot as mpl
# fig, ax = mpl.subplots(1,1)
# ax.plot(xr[kup-10:kdown+10], tr[kup-10:kdown+10])
# ax.plot(t_hwdown, halfv, 'ro')
# ax.plot(t_hwup, halfv, 'bx')
# mpl.show()
if thisspike['halfwidth'] > self.min_halfwidth: # too broad to be acceptable
if self.verbose:
print(f'{this_source_file:s}::\n spikes > min half width', thisspike['halfwidth'])
print(' halfv: ', halfv, thisspike['peak_V'], thisspike['AP_beginV'])
thisspike['halfwidth'] = None
thisspike['halfwidth_interpolated'] = None
else:
thisspike['halfwidth_interpolated'] = t_hwdown - t_hwup
pkvI = tr[thisspike['AP_peakIndex']]
pkvM = np.max(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])
pkvMa = np.argmax(tr[thisspike['AP_beginIndex']:thisspike['AP_endIndex']])
if pkvI != pkvM:
pktrap = True
return(thisspike)
def getIVCurrentThresholds(self):
""" figure out "threshold" for spike, get 150% and 300% points.
Parameters
----------
None
Returns
-------
tuple: (int, int)
The tuple contains the index to command threshold for spikes, and 150% of that threshold
The indices are computed to be as close to the command step values
that are actually used (so, the threshold is absolute; the 150%
value will be the closest estimate given the step sizes used to
collect the data)
"""
icmd = [] # list of command currents that resulted in spikes.
for m in sorted(self.spikeShape.keys()):
n = len(list(self.spikeShape[m].keys())) # number of spikes in the trace
for n in list(self.spikeShape[m].keys()):
icmd.append(self.spikeShape[m][n]['current'])
icmd = np.array(icmd)
try:
iamin = np.argmin(icmd)
except:
print(f'{this_source_file:s}: Problem with command: ')
print('self.spikeShape.keys(): ', self.spikeShape.keys())
print(' m = ', m)
print(' n = ', n)
print(' current? ', self.spikeShape[m][n]['current'])
raise ValueError(f'{this_source_file:s}:getIVCurrentThresholds - icmd seems to be ? : ', icmd)
imin = np.min(icmd)
ia150 = np.argmin(np.abs(1.5*imin-icmd))
iacmdthr = np.argmin(np.abs(imin-self.Clamps.values))
ia150cmdthr = np.argmin(np.abs(icmd[ia150] - self.Clamps.values))
return (iacmdthr, ia150cmdthr) # return threshold indices into self.Clamps.values array at threshold and 150% point
def getClassifyingInfo(self):
"""
Adds the classifying information according to Druckmann et al., Cerebral Cortex, 2013
to the analysis summary
Parameters
----------
None
Returns
-------
Nothing
Modifies the class analysis_summary dictionary to contain a number of results
regarding the AP train, including the first and second spike latency,
the first and second spike halfwidths, the firing rate at 150% of threshold,
and the depth of the AHP
"""
(jthr, j150) = self.getIVCurrentThresholds() # get the indices for the traces we need to pull data from
jthr = int(jthr)
j150 = int(j150)
if j150 not in list(self.spikeShape.keys()):
return
if jthr == j150 and self.verbose:
#print '\n%s:' % self.filename
print('Threshold current T and 1.5T the same: using next up value for j150')
print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape))
print('1 ', self.spikeShape[jthr][0]['current']*1e12)
print('2 ', self.spikeShape[j150+1][0]['current']*1e12)
print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12,
self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12))
j150 = jthr + 1
spikesfound = False
if len(self.spikeShape[j150]) >= 1 and (0 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][0]['halfwidth'] is not None:
self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3
self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3
if self.spikeShape[j150][0]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP1_HalfWidth_interpolated'] = self.spikeShape[j150][0]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP1_HalfWidth_interpolated'] = np.nan
spikesfound = True
if len(self.spikeShape[j150]) >= 2 and (1 in list(self.spikeShape[j150].keys())) and self.spikeShape[j150][1]['halfwidth'] is not None:
self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3
self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3
if self.spikeShape[j150][1]['halfwidth_interpolated'] is not None:
self.analysis_summary['AP2_HalfWidth_interpolated'] = self.spikeShape[j150][1]['halfwidth_interpolated']*1e3
else:
self.analysis_summary['AP2_HalfWidth_interpolated'] = np.nan
if spikesfound:
rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration
AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V'] # from first spike # first AHP depth
print(f"AHP: Begin = {self.spikeShape[j150][0]['AP_beginV']*1e3:.2f} mV")
print(f" Trough = {self.spikeShape[j150][0]['trough_V']*1e3:.2f} mV")
print(f" Depth = {AHPDepth*1e3:.2f} mV")
self.analysis_summary['FiringRate_1p5T'] = rate
self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV
def fitOne(self, x=None, yd=None, info='', function=None, fixNonMonotonic=True, excludeNonMonotonic=False):
"""Fit the FI plot to an equation that is piecewise linear up to the threshold
called Ibreak, then (1-exp(F/Frate)) for higher currents
Parameters
----------
x : numpy array (no default)
The x data to fit (typically an array of current levels)
yd : numpy array (no default)
The y data to fit (typically an array of spike counts)
if x and yd are none, we extrace from the 'FI_Curve' for this cell.
info : string (default: '')
information to add to a fitted plot
fixNonMonotonic : Boolean (default: True)
If True, only use data up to the maximal firing rate,
discarding the remainder of the steps under the assumption
that the cell is entering depolarization block.
excludeNonMonotonic : Boolean (default: False)
if True, does not even try to fit, and returns None
Returns
-------
None if there is no fitting to be done (excluding non-monotonic or no spikes)
tuple of (fpar, xf, yf, names, error, f, func)
These are the fit parameters
"""
# print('fitone called')
if function is not None:
self.FIGrowth = function
if x is None: # use class data
x = self.analysis_summary['FI_Curve'][0]*1e9
yd = self.analysis_summary['FI_Curve'][1]/self.analysis_summary['pulseDuration'] # convert to rate in spikes/second
if self.FIGrowth == 'fitOneOriginal':
ymax = np.max(yd)
ymax_a = 0.8*ymax
if ymax <= 0.:
return(None)
nonmono = 0
if fixNonMonotonic: # clip at max firing rate
ydiff = | np.gradient(yd, x) | numpy.gradient |
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import argparse
import configparser
import os
import sys
import tempfile
import math
import boto3
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
import numpy as np
from requests import HTTPError
def _generate_config(token, args):
boss_host = os.getenv("BOSSDB_HOST", args.host)
print(boss_host)
cfg = configparser.ConfigParser()
cfg["Project Service"] = {}
cfg["Metadata Service"] = {}
cfg["Volume Service"] = {}
project = cfg["Project Service"]
project["protocol"] = "https"
project["host"] = boss_host
project["token"] = token
metadata = cfg["Metadata Service"]
metadata["protocol"] = "https"
metadata["host"] = boss_host
metadata["token"] = token
volume = cfg["Volume Service"]
volume["protocol"] = "https"
volume["host"] = boss_host
volume["token"] = token
return cfg
def boss_pull_cutout(args):
if args.config:
rmt = BossRemote(args.config)
else:
cfg = _generate_config(args.token, args)
with open("intern.cfg", "w") as f:
cfg.write(f)
rmt = BossRemote("intern.cfg")
COLL_NAME = args.coll
EXP_NAME = args.exp
CHAN_NAME = args.chan
# Create or get a channel to write to
chan_setup = ChannelResource(
CHAN_NAME, COLL_NAME, EXP_NAME, type=args.itype, datatype=args.dtype
)
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
# get coordinate frame to determine padding bounds
cfr = CoordinateFrameResource(args.coord)
cfr_actual = rmt.get_project(cfr)
x_min_bound = cfr_actual.x_start
x_max_bound = cfr_actual.x_stop
y_min_bound = cfr_actual.y_start
y_max_bound = cfr_actual.y_stop
z_min_bound = cfr_actual.z_start
z_max_bound = cfr_actual.z_stop
print("Data model setup.")
xmin = np.max([x_min_bound, args.xmin - args.padding])
xmax = np.min([x_max_bound, args.xmax + args.padding])
x_rng = [xmin, xmax]
ymin = np.max([y_min_bound, args.ymin - args.padding])
ymax = np.min([y_max_bound, args.ymax + args.padding])
y_rng = [ymin, ymax]
zmin = np.max([z_min_bound, args.zmin - args.padding])
zmax = np.min([z_max_bound, args.zmax + args.padding])
z_rng = [zmin, zmax]
# Verify that the cutout uploaded correctly.
attempts = 0
while attempts < 3:
try:
cutout_data = rmt.get_cutout(chan_actual, args.res, x_rng, y_rng, z_rng)
break
except HTTPError as e:
if attempts < 3:
attempts += 1
print("Obtained HTTP error from server. Trial {}".format(attempts))
else:
print("Failed 3 times: {}".format(e))
# Data will be in Z,Y,X format
# Change to X,Y,Z for pipeline
cutout_data = np.transpose(cutout_data, (2, 1, 0))
def _upload(f):
print("Uploading to s3:/{}/{}".format(args.bucket, args.output))
s3 = boto3.resource("s3")
f.seek(0, 0)
s3.Object(args.bucket, args.output).put(Body=f)
# Clean up.
if args.bucket and args.s3_only:
with tempfile.TemporaryFile() as f:
np.save(f, cutout_data)
_upload(f)
else:
with open(args.output, "w+b") as f:
np.save(f, cutout_data)
if args.bucket:
_upload(f)
# here we push a subset of padded data back to BOSS
def boss_push_cutout(args):
if args.config:
rmt = BossRemote(args.config)
else:
cfg = _generate_config(args.token, args)
with open("intern.cfg", "w") as f:
cfg.write(f)
rmt = BossRemote("intern.cfg")
# data is desired range
if args.bucket:
s3 = boto3.resource("s3")
with tempfile.TemporaryFile() as f:
s3.Bucket(args.bucket).download_fileobj(args.input, f)
f.seek(0, 0)
data = np.load(f)
else:
data = np.load(args.input)
numpyType = np.uint8
if args.dtype == "uint32":
numpyType = np.uint32
elif args.dtype == "uint64":
numpyType = np.uint64
if data.dtype != args.dtype:
data = data.astype(numpyType)
sources = []
if args.source:
sources.append(args.source)
COLL_NAME = args.coll
EXP_NAME = args.exp
CHAN_NAME = args.chan
# Create or get a channel to write to
chan_setup = ChannelResource(
CHAN_NAME,
COLL_NAME,
EXP_NAME,
type=args.itype,
datatype=args.dtype,
sources=sources,
)
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
# get coordinate frame to determine padding bounds
cfr = CoordinateFrameResource(args.coord)
cfr_actual = rmt.get_project(cfr)
x_min_bound = cfr_actual.x_start
x_max_bound = cfr_actual.x_stop
y_min_bound = cfr_actual.y_start
y_max_bound = cfr_actual.y_stop
z_min_bound = cfr_actual.z_start
z_max_bound = cfr_actual.z_stop
print("Data model setup.")
# Ranges use the Python convention where the number after the : is the stop
# value. Thus, x_rng specifies x values where: 0 <= x < 8.
data_shape = data.shape # with padding, will be bigger than needed
# find data cutoffs to get rid of padding
# if nmin = 0, this means that the data wasn't padded on there to begin with
xstart = args.padding if args.xmin != 0 else 0
ystart = args.padding if args.ymin != 0 else 0
zstart = args.padding if args.zmin != 0 else 0
xend = data_shape[0] - args.padding
yend = data_shape[1] - args.padding
zend = data_shape[2] - args.padding
# xstart = np.min([args.padding,args.xmin-x_min_bound])
# xend = np.max([data.shape[0]-args.padding,data.shape[0]-(x_max_bound-args.xmax)])
# ystart = np.min([args.padding,args.ymin-y_min_bound])
# yend = np.max([data.shape[1]-args.padding,data.shape[1]-(y_max_bound-args.ymax)])
# zstart = np.min([args.padding,args.zmin-z_min_bound])
# zend = np.max([data.shape[2]-args.padding,data.shape[2]-(z_max_bound-args.zmax)])
# get range which will be uploaded
x_rng = [args.xmin, args.xmax]
y_rng = [args.ymin, args.ymax]
z_rng = [args.zmin, args.zmax]
# Pipeline Data will be in X,Y,Z format
# Change to Z,Y,X for upload
data = np.transpose(data, (2, 1, 0))
data = data[zstart:zend, ystart:yend, xstart:xend]
data = data.copy(order="C")
# Verify that the cutout uploaded correctly.
attempts = 0
while attempts < 3:
try:
rmt.create_cutout(chan_actual, args.res, x_rng, y_rng, z_rng, data)
break
except HTTPError as e:
if attempts < 3:
attempts += 1
print("These are the dimensions: ")
print(data.shape)
print("This is the data type:")
print(data.dtype)
print("Specified data type was:")
print(args.dtype)
print("Specified image type")
print(args.itype)
print("Obtained HTTP error from server. Trial {}".format(attempts))
print("The error: {}".format(e))
else:
raise Exception("Failed 3 times: {}".format(e))
# Clean up.
"""boss_merge_xbrain
Here we push a subset of padded data back to BOSS, merging with existing data in BOSS in padded region
Merging here is for XBrain only, will need to work more on EM
Here we are pushing a block of data into a channel which has other blocks of data, possibly already in it
The blocks are padded to detect objects at the edges
This requires merging of the blocks
Not that currently, it is assumed that all cells are detected using templatesize- as a single value.
Overlapping cells should already be handled by cell_detect, where the dilation dictionary zeros out around a detected cell
Inputs:
xmin: integer index of start of data block, before padding (in coordinate frame of raw data channel)
xmax: integer index of end of data block, before padding (in coordinate frame of raw data channel)
ymin: integer index of start of data block, before padding (in coordinate frame of raw data channel)
ymax: integer index of end of data block, before padding (in coordinate frame of raw data channel)
zmin: integer index of start of data block, before padding (in coordinate frame of raw data channel)
zmax: integer index of end of data block, before padding (in coordinate frame of raw data channel)
Padding: integer amount of padding added to each side of block (if valid given the channel boundaries)
One sided: int, if 1, only merge on the max edge of block. This prevents duplicates in the padding zone. If zero, merge all edges, risking duplicates
templatesize: integer value giving initial diameter of cell detection templates. Padding should be at least templatesize+1
input: dense cell detection output from cell detection step
centroids: centroid location output from cell detections step (coordinates are referenced to the )
Output:
output: centroid output, with any cells on border removed, and coordinates shifted into frame of raw data
Side effect:
Data is uploaded to the BOSS channel, covering [xmin:xmax,ymin:ymax,zmin:zmax]. Any padded regions around this block are also merged
"""
def boss_merge_xbrain(args):
# Verify that the cutout uploaded correctly.
def pull_margin_cutout(chan_actual, x_rng, y_rng, z_rng):
attempts = 0
while attempts < 3:
try:
cutout_data = rmt.get_cutout(chan_actual, 0, x_rng, y_rng, z_rng)
break
except HTTPError as e:
if attempts < 3:
attempts += 1
print("Obtained HTTP error from server. Trial {}".format(attempts))
else:
print("Failed 3 times: {}".format(e))
# Data will be in Z,Y,X format
# Change to X,Y,Z for pipeline
cutout_data = np.transpose(cutout_data, (2, 1, 0))
return cutout_data
templatesize = args.templatesize
if args.config:
rmt = BossRemote(args.config)
else:
cfg = _generate_config(args.token, args)
with open("intern.cfg", "w") as f:
cfg.write(f)
rmt = BossRemote("intern.cfg")
# data is desired range
if args.bucket:
s3 = boto3.resource("s3")
with tempfile.TemporaryFile() as f:
s3.Bucket(args.bucket).download_fileobj(args.input, f)
f.seek(0, 0)
data = np.load(f)
with tempfile.TemporaryFile() as f:
s3.Bucket(args.bucket).download_fileobj(args.centroids, f)
f.seek(0, 0)
centroids = np.load(f)
else:
data = np.load(args.input)
centroids = np.load(args.centroids)
COLL_NAME = args.coll
EXP_NAME = args.exp
CHAN_NAME = args.chan
# Create or get a channel to write to
chan_setup = ChannelResource(
CHAN_NAME, COLL_NAME, EXP_NAME, type=args.itype, datatype=args.dtype
)
try:
chan_actual = rmt.get_project(chan_setup)
except HTTPError:
chan_actual = rmt.create_project(chan_setup)
# get coordinate frame to determine padding bounds
cfr = CoordinateFrameResource(args.coord)
cfr_actual = rmt.get_project(cfr)
x_min_bound = cfr_actual.x_start
x_max_bound = cfr_actual.x_stop
y_min_bound = cfr_actual.y_start
y_max_bound = cfr_actual.y_stop
z_min_bound = cfr_actual.z_start
z_max_bound = cfr_actual.z_stop
# coordinates of data block in original coordinate frame, before padding
x_block = [args.xmin, args.xmax]
y_block = [args.ymin, args.ymax]
z_block = [args.zmin, args.zmax]
# Coordinates of data block with padding in original coordinate frame
x_block_pad = [
np.amax([args.xmin - args.padding, x_min_bound]),
np.amin([args.xmax + args.padding, x_max_bound]),
]
y_block_pad = [
np.amax([args.ymin - args.padding, y_min_bound]),
np.amin([args.ymax + args.padding, y_max_bound]),
]
z_block_pad = [
np.amax([args.zmin - args.padding, z_min_bound]),
| np.amin([args.zmax + args.padding, z_max_bound]) | numpy.amin |
from unittest import TestCase
import rasterio
import numpy as np
import niche_vlaanderen
import pytest
from niche_vlaanderen.exception import NicheException
def raster_to_numpy(filename):
"""Read a GDAL grid as numpy array
Notes
------
No-data values are -99 for integer types and np.nan for real types.
"""
with rasterio.open(filename) as ds:
data = ds.read(1)
nodata = ds.nodatavals[0]
print(nodata)
# create a mask for no-data values, taking into account the data-types
if data.dtype == 'float32':
data[np.isclose(data, nodata)] = np.nan
else:
data[np.isclose(data, nodata)] = -99
return data
class testAcidity(TestCase):
def test_get_soil_mlw(self):
mlw = np.array([50, 66])
soil_code = np.array([14, 7])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
np.testing.assert_equal(np.array([1, 9]), result)
def test_get_soil_mlw_borders(self):
mlw = np.array([79, 80, 100, 110, 111])
soil_code = np.array([14, 14, 14, 14, 14])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
expected = np.array([1, 1, 2, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity_partial(self):
rainwater = np.array([0])
minerality = np.array([1])
inundation = np.array([1])
seepage = np.array([1])
soil_mlw = np.array([1])
a = niche_vlaanderen.Acidity()
result = a._get_acidity(rainwater, minerality, inundation,
seepage, soil_mlw)
np.testing.assert_equal(np.array([3]), result)
def test_seepage_code(self):
seepage = np.array([5, 0.3, 0.05, -0.04, -0.2, -5])
a = niche_vlaanderen.Acidity()
result = a._get_seepage(seepage)
expected = np.array([1, 1, 1, 1, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity(self):
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([14])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
result = a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
np.testing.assert_equal(3, result)
def test_acidity_testcase(self):
a = niche_vlaanderen.Acidity()
inputdir = "testcase/zwarte_beek/input/"
soil_code = raster_to_numpy(inputdir + "soil_code.asc")
soil_code_r = soil_code
soil_code_r[soil_code > 0] = np.round(soil_code / 10000)[soil_code > 0]
mlw = raster_to_numpy(inputdir + "mlw.asc")
inundation = \
raster_to_numpy(inputdir + "inundation.asc")
rainwater = raster_to_numpy(inputdir + "nullgrid.asc")
seepage = raster_to_numpy(inputdir + "seepage.asc")
minerality = raster_to_numpy(inputdir + "minerality.asc")
acidity = raster_to_numpy("testcase/zwarte_beek/abiotic/acidity.asc")
acidity[np.isnan(acidity)] = 255
acidity[acidity == -99] = 255
result = a.calculate(soil_code_r, mlw, inundation, seepage,
minerality, rainwater)
np.testing.assert_equal(acidity, result)
def test_acidity_invalidsoil(self):
a = niche_vlaanderen.Acidity()
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([-1])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
with pytest.raises(NicheException):
a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
def test_acidity_invalidminerality(self):
a = niche_vlaanderen.Acidity()
rainwater = | np.array([0]) | numpy.array |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for models."""
import functools
import pathlib
import gin
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
import tensorflow.compat.v1 as tf
from protein_lm import data
from protein_lm import domains
from protein_lm import evaluation
from protein_lm import models
from protein_lm import modules
tf.enable_eager_execution()
lm_cfg = dict(
batch_size=4, num_layers=1, num_heads=2, emb_dim=8, mlp_dim=8, qkv_dim=8)
lm_cls = functools.partial(models.FlaxLM, **lm_cfg)
def _test_domain():
return domains.FixedLengthDiscreteDomain(length=3, vocab_size=4)
class FlaxRegressionTest(tf.test.TestCase):
def setUp(self):
cls = functools.partial(
models.FlaxModel,
pmap=False,
with_bos=True,
output_head=('logits', 'regression'),
**lm_cfg)
self._domain = domains.FixedLengthDiscreteDomain(length=6, vocab_size=4)
lm = cls(domain=self._domain)
self.lm = lm
super().setUp()
def test_fit(self):
bos = self.lm.bos_token
xs = np.array([
[bos, 0, 1, 0, 1, 0, 1],
[bos, 1, 0, 1, 0, 1, 0],
])
inputs = xs[:, :-1]
targets = xs[:, 1:]
reg_targets = np.cumsum(targets, axis=-1)[:, :-1]
reg_targets = jnp.pad(
reg_targets, [[0, 0], [1, 0]],
mode='constant',
constant_values=jnp.asarray(0, dtype=jnp.float32))
batch = dict(
inputs=inputs,
targets=dict(classification=targets, regression=reg_targets),
weights=dict(
classification=np.ones_like(targets),
regression=np.ones_like(reg_targets)))
outputs = self.lm.preprocess(batch, mode=models.Mode.train, rng=None)
print(outputs)
metrics = self.lm.fit_batch(batch)
print(metrics)
metrics = self.lm.fit_batch(batch)
print(metrics)
class FlaxModelTaggingTest(tf.test.TestCase):
def test_tag_attention(self):
# TODO(ddohan): Consider making decorator which tracks tensor distributions.
def tagging_dot_product_attention(query, key, value, **kwargs):
modules.Tag(jnp.mean(query), name='mean_query')
modules.Tag(jnp.mean(query), name='mean_key')
modules.Tag(jnp.mean(query), name='mean_value')
return modules.nn.attention.dot_product_attention(query, key, value,
**kwargs)
domain = _test_domain()
lm = models.FlaxModel(
domain=domain, attention_fn=tagging_dot_product_attention, **lm_cfg)
xs = domain.sample_uniformly(4)
metrics = []
for _ in range(2):
step_metrics = lm.fit_batch((xs, xs))
metrics.append(step_metrics)
combined_metrics = evaluation.combine_metrics(metrics)
# Confirm metrics are included.
self.assertIn('nn/1/1/mean_query', combined_metrics)
self.assertIn('nn/1/1/mean_key', combined_metrics)
self.assertIn('nn/1/1/mean_value', combined_metrics)
# Check they are averaged rather than normalized by denominator.
# key = 'nn/1/1/mean_value'
# avg = (metrics[0][key] + metrics[1][key]) / 2.0
# self.assertAlmostEqual(avg, combined_metrics[key])
class FlaxModelBaseTest(tf.test.TestCase):
def setUp(self):
cls = functools.partial(models.FlaxModel, **lm_cfg)
self._domain = _test_domain()
lm = cls(domain=self._domain)
self.lm = lm
self.xs = np.array([
[1, 1, 0],
[1, 1, 1],
])
super().setUp()
def test_fit(self):
self.lm.fit((self.xs, self.xs), epochs=4, batch_size=2)
def test_score(self):
scores = self.lm.score(self.xs)
self.assertEqual((2, 3, self._domain.vocab_size), scores.shape)
def test_evaluate(self):
metrics = self.lm.evaluate_batch((self.xs, self.xs))
self.assertIn('accuracy', metrics)
self.assertEqual(jnp.sum(jnp.ones_like(self.xs)), metrics['denominator'])
metrics = self.lm.evaluate_batch(
(self.xs, self.xs, jnp.zeros_like(self.xs)))
self.assertEqual(0, metrics['denominator'])
class BERTTest(tf.test.TestCase):
def setUp(self):
cls = functools.partial(models.FlaxBERT, **lm_cfg)
self._domain = domains.VariableLengthDiscreteDomain(
vocab=domains.ProteinVocab(
include_anomalous_amino_acids=True,
include_bos=True,
include_eos=True,
include_pad=True,
include_mask=True),
length=3)
lm = cls(domain=self._domain, grad_clip=1.0)
self.lm = lm
self.xs = np.array([
[1, 1, 0],
])
super().setUp()
def test_fit(self):
self.lm.fit(self.xs, epochs=4, batch_size=2)
def test_score(self):
scores = self.lm.score(self.xs)
self.assertEqual((1, 3, self._domain.vocab_size), scores.shape)
def test_sample(self):
mask = self._domain.vocab.mask
xs = np.array([
[mask, 1, 0],
[1, mask, 1],
[1, 1, mask],
])
rng = jrandom.PRNGKey(0)
samples = self.lm.sample(xs, rng=rng)
self.assertAllEqual(xs.shape, samples.shape)
# Check masked positions are filled in.
self.assertNotEqual(samples[0, 0], mask)
self.assertNotEqual(samples[1, 1], mask)
self.assertNotEqual(samples[2, 2], mask)
unmasked = xs != mask
# Unmasked positions are the same.
self.assertAllEqual(xs[unmasked], samples[unmasked])
def test_evaluate(self):
metrics = self.lm.evaluate_batch(self.xs)
self.assertIn('accuracy', metrics)
class BERTMaskingTest(tf.test.TestCase):
def setUp(self):
self._domain = data.protein_domain
v = data.protein_domain.vocab
pad = v.pad
print(v.pad)
self._xs = jnp.array([[0, 0, 0, 0, 0, 0, pad, pad],
[0, 0, 0, 0, 0, pad, pad, pad]])
super().setUp()
def test_all_mask(self):
"""Test masking with MASK values."""
xs = self._xs
v = self._domain.vocab
masker = models.BertMasker(
self._domain,
mask_rate=1.0,
mask_token_proportion=1.0,
random_token_proportion=0.0)
for k in range(10):
rng = jrandom.PRNGKey(k)
inputs, outputs, weights = masker(xs, rng=rng, mode=models.Mode.train)
self.assertAllEqual((xs == v.pad), (inputs == v.pad))
self.assertAllEqual((xs != v.pad), (inputs == v.mask))
self.assertAllEqual(xs != v.pad, weights)
self.assertAllEqual(xs, outputs)
def test_all_normal(self):
"""Test masking with random values."""
xs = self._xs
v = self._domain.vocab
# Check
masker = models.BertMasker(
self._domain,
mask_rate=1.0,
mask_token_proportion=0.0,
random_token_proportion=1.0)
for k in range(10):
rng = jrandom.PRNGKey(k)
inputs, outputs, weights = masker(xs, rng=rng, mode=models.Mode.train)
is_normal = np.isin(inputs, masker._normal_tokens)
self.assertAllEqual((xs == v.pad), (inputs == v.pad))
self.assertAllEqual((xs != v.pad), is_normal)
self.assertAllEqual(xs != v.pad, weights)
self.assertAllEqual(xs, outputs)
def test_all_identity(self):
"""Test no-mask case (maintaining original values)."""
xs = self._xs
v = self._domain.vocab
masker = models.BertMasker(
self._domain,
mask_rate=1.0,
mask_token_proportion=0.0,
random_token_proportion=0.0)
for k in range(10):
rng = jrandom.PRNGKey(k)
inputs, outputs, weights = masker(xs, rng=rng, mode=models.Mode.train)
self.assertAllEqual(xs, inputs)
self.assertAllEqual(xs != v.pad, weights)
self.assertAllEqual(xs, outputs)
class UtilTest(tf.test.TestCase):
def test_lr_schedule(self):
"""Test passing learning rate function as learning_rate."""
cls = functools.partial(models.FlaxLM, **lm_cfg)
lm = cls(
domain=_test_domain(),
learning_rate=models.utils.create_learning_rate_scheduler(),
grad_clip=1.0)
xs = np.array([
[1, 1, 0],
])
lm.fit(xs, epochs=4, batch_size=2)
class LMTest(tf.test.TestCase):
def setUp(self):
cls = functools.partial(models.FlaxLM, **lm_cfg)
lm = cls(domain=_test_domain(), grad_clip=1.0)
self.lm = lm
self.xs = np.array([
[1, 1, 0],
])
super().setUp()
def test_fit(self):
self.lm.fit(self.xs, epochs=4, batch_size=2)
def test_sample(self):
samples = self.lm.sample(2)
self.assertEqual((2, 3), samples.shape)
def test_score(self):
scores = self.lm.score(self.xs)
self.assertEqual((1, 3, 5), scores.shape)
def test_evaluate(self):
metrics = self.lm.evaluate_batch(self.xs)
self.assertIn('accuracy', metrics)
def test_overfit(self):
domain = domains.VariableLengthDiscreteDomain(
vocab=domains.Vocabulary(
tokens=['a', 'b', 'c'], include_bos=True, include_eos=True),
length=9)
seqs = [
list('abcabcab'),
list('bbbbbb'),
list('cbacbacb'),
]
enc = domain.encode(seqs, pad=True)
self.assertAllEqual(
[[0, 1, 2, 0, 1, 2, 0, 1, 4],
[1, 1, 1, 1, 1, 1, 4, 4, 4],
[2, 1, 0, 2, 1, 0, 2, 1, 4]
], enc)
enc = np.array(enc)
model = lm_cls(
domain=domain,
learning_rate=0.01,
dropout_rate=0.0,
attention_dropout_rate=0.0)
for _ in range(100):
metrics = model.fit_batch(enc)
# 2 less than perfect because the first token is unpredictable given just
# <BOS>, and there are 3 total examples.
denom = metrics['denominator'][0]
correct = metrics['accuracy'][0]
self.assertEqual((denom - 2)/denom, correct / denom)
def test_bos_does_not_appear_in_fixed_len_output(self):
"""Tests that BOS is overridden in fixed length domain samples."""
domain = domains.FixedLengthDiscreteDomain(vocab_size=2, length=10)
lm = lm_cls(domain=domain)
samples = lm.sample(10)
for sample in samples:
self.assertNotIn(lm.bos_token, sample)
def test_bos_does_not_appear_in_var_len_output(self):
"""Tests that BOS is not used for padding in var-len domain samples."""
domain = domains.VariableLengthDiscreteDomain(
vocab=domains.Vocabulary(tokens=[0, 1], include_eos=True),
length=10,
)
lm = lm_cls(domain=domain)
samples = lm.sample(10)
for sample in samples:
self.assertNotIn(lm.bos_token, sample)
def test_only_eos_after_eos(self):
"""Tests that the characters found after EOS are all equal to EOS."""
domain = domains.VariableLengthDiscreteDomain(
vocab=domains.Vocabulary(tokens=[0, 1], include_eos=True),
length=10,
)
lm = lm_cls(domain=domain)
samples = lm.sample(10)
for sample in samples:
if lm.eos_token in sample:
start_eos = | np.argwhere(sample == lm.eos_token) | numpy.argwhere |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue( | np.allclose(self.hp.values, temp_hp.values) | numpy.allclose |
import numpy as np
from sklearn.preprocessing import QuantileTransformer
# PyTest testing infrastructure
import pytest
# Local testing infrastructure
from wrap import deploy_pickle
################################################################################
## Test preparation
@pytest.fixture
def scaler_uniform():
scaler_ = QuantileTransformer()
X = np.random.uniform (20,30,(1000, 10))
scaler_.fit (X)
return scaler_
@pytest.fixture
def scaler_normal():
scaler_ = QuantileTransformer(output_distribution='normal', n_quantiles=100)
X = np.random.uniform (20,30,(1000, 10))
scaler_.fit (X)
return scaler_
@pytest.fixture
def scaler_bool_uniform():
scaler_ = QuantileTransformer(output_distribution='uniform')
X = np.random.choice ([22.,27.], (1000, 10), (0.8, 0.2))
scaler_.fit (X)
return scaler_
@pytest.fixture
def scaler_bool_normal():
scaler_ = QuantileTransformer(output_distribution='normal')
X = | np.random.choice ([22.,27.], (1000, 10), (0.8, 0.2)) | numpy.random.choice |
#!/usr/bin/env python
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from scipy.stats import pearsonr, spearmanr
#===============================================================================
#===============================================================================
class Metrics:
@staticmethod
def r2(true, pred):
return r2_score(true, pred)
@staticmethod
def rmse(true, pred):
return np.sqrt(mean_squared_error(true, pred))
@staticmethod
def mae(true, pred):
return mean_absolute_error(true, pred)
@staticmethod
def pearson(true, pred):
if true.shape[-1] == 1:
true, pred = np.squeeze(true), np.squeeze(pred)
pearson_coeff, p_value = pearsonr(true, pred)
return pearson_coeff
else:
pearsons = []
for dim in range(true.shape[-1]):
pearson_coeff, p_value = pearsonr(true[:, dim], pred[:, dim])
pearsons.append(pearson_coeff)
return pearsons
@staticmethod
def spearman(true, pred):
if true.shape[-1] == 1:
true, pred = np.squeeze(true), np.squeeze(pred)
spearman_coeff, p_value = spearmanr(true, pred)
return spearman_coeff
else:
spearmans = []
for dim in range(true.shape[-1]):
spearman_coeff, p_value = spearmanr(true[:, dim], pred[:, dim])
spearmans.append(spearman_coeff)
return spearmans
# @staticmethod
# def correlation_matrix(true, pred):
# for ix in range(true.shape[0]):
def __call__(self, true, pred, kinds):
metrics = {}
for kind in kinds:
try:
fn = getattr(self, kind)
except NameError as e:
print(e)
error = fn(true, pred)
metrics[kind] = error
return metrics
@staticmethod
def get_best_metric(kind, metric_list):
'''
retrieve the dictionary for which the metric is the best
'''
if kind == 'r2':
r2s = [d['r2'] for d in metric_list]
max_ix = np.argmax(r2s)
return metric_list[max_ix]
elif kind == 'rmse':
rmses = [d['rmse'] for d in metric_list]
min_ix = np.argmin(rmses)
return metric_list[min_ix]
elif kind == 'mae':
maes = [d['mae'] for d in metric_list]
min_ix = np.argmin(maes)
return metric_list[min_ix]
elif kind == 'pearson':
pearsons = [d['pearson'] for d in metric_list]
max_ix = | np.argmax(pearsons) | numpy.argmax |
r"""
Module containing SharpClaw solvers for PyClaw/PetClaw
# File: sharpclaw.py
# Created: 2010-03-20
# Author: <NAME>
"""
# Solver superclass
from __future__ import absolute_import
from clawpack.pyclaw.solver import Solver
from clawpack.pyclaw.util import add_parent_doc
from six.moves import range
# Reconstructor
try:
# load c-based WENO reconstructor (PyWENO)
from clawpack.pyclaw.limiters import reconstruct as recon
except ImportError:
# load old WENO5 reconstructor
from clawpack.pyclaw.limiters import recon
def default_tfluct(self):
r"""This is a dummy routine and should never be called, check Euler1D
to learn how to pass tfluct functions to the sharpclaw solver
"""
if self.tfluct_solver:
raise Exception("You set solver.tfluct_solver=True, but solver.tfluct has not been set.")
pass
class SharpClawSolver(Solver):
r"""
Superclass for all SharpClawND solvers.
Implements Runge-Kutta time stepping and the basic form of a
semi-discrete step (the dq() function). If another method-of-lines
solver is implemented in the future, it should be based on this class,
which then ought to be renamed to something like "MOLSolver".
.. attribute:: lim_type
Limiter(s) to be used.
- 0: No limiting.
- 1: TVD reconstruction.
- 2: WENO reconstruction.
``Default = 2``
.. attribute:: weno_order
Order of the WENO reconstruction. From 1st to 17th order (PyWENO)
``Default = 5``
.. attribute:: time_integrator
Time integrator to be used. Currently implemented methods:
- 'Euler' : 1st-order Forward Euler integration
- 'SSP33' : 3rd-order strong stability preserving method of Shu & Osher
- 'SSP104' : 4th-order strong stability preserving method Ketcheson
- 'SSPLMM32': 2nd-order strong stability preserving 3-step linear multistep method,
using Euler for starting values
- 'SSPLMM43': 3rd-order strong stability preserving 4-step linear multistep method
using SSPRK22 for starting values
- 'RK' : Arbitrary Runge-Kutta method, specified by setting `solver.a`
and `solver.b` to the Butcher arrays of the method.
- 'LMM' : Arbitrary linear multistep method, specified by setting the
coefficient arrays `solver.alpha` and `solver.beta`.
``Default = 'SSP104'``
.. attribute:: char_decomp
Type of WENO reconstruction.
0: conservative variables WENO reconstruction (standard).
1: Wave-slope reconstruction.
2: characteristic-wise WENO reconstruction.
3: transmission-based WENO reconstruction.
``Default = 0``
.. attribute:: tfluct_solver
Whether a total fluctuation solver have to be used. If True the function
that calculates the total fluctuation must be provided.
``Default = False``
.. attribute:: tfluct
Pointer to Fortran routine to calculate total fluctuation
``Default = default_tfluct (None)``
.. attribute:: aux_time_dep
Whether the auxiliary array is time dependent.
``Default = False``
.. attribute:: kernel_language
Specifies whether to use wrapped Fortran routines ('Fortran')
or pure Python ('Python').
``Default = 'Fortran'``.
.. attribute:: num_ghost
Number of ghost cells.
``Default = 3``
.. attribute:: fwave
Whether to split the flux jump (rather than the jump in Q) into waves;
requires that the Riemann solver performs the splitting.
``Default = False``
.. attribute:: cfl_desired
Desired CFL number.
``Default = 2.45``
.. attribute:: cfl_max
Maximum CFL number.
``Default = 2.50``
.. attribute:: dq_src
Whether a source term is present. If it is present the function that
computes its contribution must be provided.
``Default = None``
.. attribute:: call_before_step_each_stage
Whether to call the method `self.before_step` before each RK stage.
``Default = False``
"""
_sspcoeff = {
'Euler' : 1.0,
'SSP22': 1.0,
'SSP33': 1.0,
'SSP104': 6.0,
'SSPLMM32': 0.5,
'SSPLMM43': 1./3.,
'SSPLMM53': 0.5,
'RK': None,
'LMM': None
}
_cfl_default = {
'SSP104': [2.45, 2.5],
'SSPLMM32': [0.24, 0.25],
'SSPLMM43': [0.15, 1./6.],
'SSPLMM53': [0.24, 0.25]
}
# ========================================================================
# Initialization routines
# ========================================================================
def __init__(self,riemann_solver=None,claw_package=None):
r"""
Set default options for SharpClawSolvers and call the super's __init__().
"""
self.limiters = [1]
self.lim_type = 2
self.weno_order = 5
self.time_integrator = 'SSP104'
self.char_decomp = 0
self.tfluct_solver = False
self.tfluct = default_tfluct
self.aux_time_dep = False
self.kernel_language = 'Fortran'
self.num_ghost = 3
self.fwave = False
self.cfl_desired = None
self.cfl_max = None
self.dq_src = None
self.call_before_step_each_stage = False
self._mthlim = self.limiters
self._method = None
self._registers = None
self.dq_dt = None
self.dt_old = None
# Used only if time integrator is 'RK'
self.a = None
self.b = None
self.c = None
# Used only if time integrator is a multistep method
self.sspcoeff0 = None
self.alpha = None
self.beta = None
self.lmm_steps = 4
self.sspcoeff = None
self.prev_dq_dt_values = []
self.prev_dt_values = []
self.prev_dtFE_values = []
self.check_lmm_cond = False
self.lmm_cond = True
# Call general initialization function
super(SharpClawSolver,self).__init__(riemann_solver,claw_package)
def setup(self,solution):
"""
Allocate RK stage arrays or previous step solutions and fortran routine work arrays.
"""
if self.lim_type == 2:
self.num_ghost = (self.weno_order+1)//2
if self.lim_type == 2 and self.weno_order != 5 and self.kernel_language == 'Python':
raise Exception('Only 5th-order WENO reconstruction is implemented in Python kernels. \
Use Fortran for higher-order WENO.')
# This is a hack to deal with the fact that petsc4py
# doesn't allow us to change the stencil_width (num_ghost)
state = solution.state
state.set_num_ghost(self.num_ghost)
# End hack
if self.time_integrator == 'LMM':
assert self.dt_variable == False, \
'Must set solver.dt_variable=False for LMM integrator.'
try:
if 'SSPLMM' in self.time_integrator:
if self.time_integrator == 'SSPLMMk2':
assert 3 <= self.lmm_steps, \
'Must set solver.lmm_steps greater than 2 for 2nd order SSPLMM integrator.'
self.sspcoeff = (self.lmm_steps - 2.)/(self.lmm_steps - 1.)
# The choice of cfl_desired and cfl_max is intended for LMM with many steps (up to 20).
# If more steps are chosen the solution may not be accurate enough.
# For a smaller number of steps, higher values of cfl_desired and cfl_max can be used.
if self.cfl_max is None:
self.cfl_desired = 0.14*self.sspcoeff
self.cfl_max = 0.15*self.sspcoeff
elif self.time_integrator == 'SSPLMMk3':
assert 4 <= self.lmm_steps <= 5, \
'Must set solver.lmm_steps equal to 4 or 5 for 3rd order SSPLMM integrator.'
self.sspcoeff = (self.lmm_steps - 3.)/(self.lmm_steps - 1.)
if self.cfl_max is None:
self.cfl_desired = 0.48*self.sspcoeff
self.cfl_max = 0.5*self.sspcoeff
else:
if self.cfl_max is None:
self.cfl_desired = self._cfl_default[self.time_integrator][0]
self.cfl_max = self._cfl_default[self.time_integrator][1]
if self.cfl_desired is None:
self.cfl_desired = 0.9*self.cfl_max
except KeyError:
raise KeyError('Maximum CFL number is not provided.')
self._allocate_registers(solution)
self._set_mthlim()
state = solution.states[0]
if self.kernel_language=='Fortran':
if self.fmod is None:
so_name = 'clawpack.pyclaw.sharpclaw.sharpclaw'+str(self.num_dim)
self.fmod = __import__(so_name,fromlist=['clawpack.pyclaw.sharpclaw'])
state.set_cparam(self.fmod)
state.set_cparam(self.rp)
state.set_cparam(self.tfluct)
self._set_fortran_parameters(state,self.fmod.clawparams,self.fmod.workspace,self.fmod.reconstruct)
self._allocate_bc_arrays(state)
super(SharpClawSolver,self).setup(solution)
def __del__(self):
r"""
Deallocate F90 module arrays.
Also delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if self.kernel_language=='Fortran':
self.fmod.clawparams.dealloc_clawparams()
self.fmod.workspace.dealloc_workspace(self.char_decomp)
self.fmod.reconstruct.dealloc_recon_workspace(self.fmod.clawparams.lim_type,self.fmod.clawparams.char_decomp)
del self.fmod
super(SharpClawSolver,self).__del__()
# ========== Time stepping routines ======================================
def step(self,solution,take_one_step,tstart,tend):
"""Evolve q over one time step.
Take one step with a Runge-Kutta or multistep method as specified by
`solver.time_integrator`.
"""
state = solution.states[0]
step_index = self.status['numsteps'] + 1
if self.accept_step == True:
self.cfl.set_global_max(0.)
self.dq_dt = self.dq(state) / self.dt
if 'LMM' in self.time_integrator:
step_index = self.update_saved_values(state,step_index)
self.get_dt(solution.t,tstart,tend,take_one_step)
# Recompute cfl number based on current step-size
cfl = self.cfl.get_cached_max()
self.cfl.set_global_max(self.dt / self.dt_old * cfl)
self.dt_old = self.dt
### Runge-Kutta methods ###
if self.time_integrator == 'Euler':
state.q += self.dt*self.dq_dt
elif self.time_integrator == 'SSP22':
self.ssp22(state)
elif self.time_integrator == 'SSP33':
self._registers[0].q = state.q + self.dt*self.dq_dt
self._registers[0].t = state.t + self.dt
if self.call_before_step_each_stage:
self.before_step(self,self._registers[0])
self._registers[0].q = 0.75*state.q + 0.25*(self._registers[0].q + self.dq(self._registers[0]))
self._registers[0].t = state.t + 0.5*self.dt
if self.call_before_step_each_stage:
self.before_step(self,self._registers[0])
state.q = 1./3.*state.q + 2./3.*(self._registers[0].q + self.dq(self._registers[0]))
elif self.time_integrator == 'SSP104':
self.ssp104(state)
elif self.time_integrator == 'RK':
# General explicit RK with specified coefficients
# This is pulled out of the loop in order to use dq_dt
self._registers[0].q = self.dt*self.dq_dt
self._registers[0].t = state.t
num_stages = len(self.b)
for i in range(1,num_stages):
self._registers[i].q[:] = state.q
for j in range(i):
self._registers[i].q += self.a[i,j]*self._registers[j].q
self._registers[i].t = state.t + self.dt*self.c[i]
# self._registers[i].q eventually stores dt*f(y_i) after stage solution y_i is computed
self._registers[i].q = self.dq(self._registers[i])
for j in range(num_stages):
state.q += self.b[j]*self._registers[j].q
### Linear multistep methods ###
elif self.time_integrator in ['SSPLMMk2', 'SSPLMMk3']:
num_steps = self.lmm_steps
if step_index < num_steps:
# Use SSP22 Runge-Kutta method for starting values
self.ssp22(state)
else:
if self.time_integrator == 'SSPLMMk2':
omega_k_minus_1 = sum(self.prev_dt_values[1:])/self.dt
r = (omega_k_minus_1-1.)/omega_k_minus_1 # SSP coefficient
delta = 1./omega_k_minus_1**2
beta = (omega_k_minus_1+1.)/omega_k_minus_1
state.q = beta*(r*state.q + self.dt*self.dq_dt) + delta*self._registers[-num_steps].q
else:
omega_k_minus_1 = sum(self.prev_dt_values[1:])/self.dt
omega_k = omega_k_minus_1 + 1.
r = (omega_k_minus_1-2.)/omega_k_minus_1 # SSP coefficient
delta0 = (4*omega_k - omega_k_minus_1**2)/omega_k_minus_1**3
beta0 = omega_k/omega_k_minus_1**2
beta_k_minus_1 = omega_k**2/omega_k_minus_1**2
state.q = beta_k_minus_1*(r*state.q + self.dt*self.prev_dq_dt_values[-1]) + \
(r*beta0 + delta0)*self._registers[-num_steps].q + \
beta0*self.dt*self.prev_dq_dt_values[-num_steps]
elif self.time_integrator == 'LMM':
if step_index < len(self._registers):
self.ssp104(state) # Use SSP104 for starting values
else:
# Update solution: alpha[-1] and beta[-1] correspond to solution at the previous step
state.q = self.alpha[-1]*self._registers[-1].q + self.beta[-1]*self.dt*self.prev_dq_dt_values[-1]
for i in range(self.lmm_steps-1):
state.q += self.alpha[i]*self._registers[i].q + self.beta[i]*self.dt*self.prev_dq_dt_values[i]
else:
raise Exception('Unrecognized time integrator')
return False
def ssp22(self,state):
self._registers[0].q = state.q + self.dt*self.dq_dt
self._registers[0].t = state.t + self.dt
if self.call_before_step_each_stage:
self.before_step(self,self._registers[0])
state.q = 0.5*(state.q + self._registers[0].q + self.dq(self._registers[0]))
def ssp104(self,state):
if self.time_integrator == 'SSP104':
s1 = self._registers[0]
s1.q[:] = state.q
elif self.time_integrator == 'LMM':
# Okay to copy state objects here since this only happens a few times
import copy
s1 = copy.deepcopy(state)
s1.q = state.q + self.dt*self.dq_dt/6.
s1.t = state.t + self.dt/6.
for i in range(4):
if self.call_before_step_each_stage:
self.before_step(self,s1)
s1.q = s1.q + self.dq(s1)/6.
s1.t = s1.t + self.dt/6.
state.q = state.q/25. + 0.36 * s1.q
s1.q = 15. * state.q - 5. * s1.q
s1.t = state.t + self.dt/3.
for i in range(4):
if self.call_before_step_each_stage:
self.before_step(self,s1)
s1.q = s1.q + self.dq(s1)/6.
s1.t = s1.t + self.dt/6.
if self.call_before_step_each_stage:
self.before_step(self,s1)
state.q += 0.6 * s1.q + 0.1 * self.dq(s1)
def update_saved_values(self,state,step_index):
r"""
Updates lists of saved function evaluations, solution values, dt and dtFE for LMMs.
For 3rd-order SSPLMM additional conditions are checked if self.check_lmm_cond is set to True.
If these conditions are violated, the step is rejected.
"""
if (self.prev_dt_values == []):
# This only happens at the very beginning of the computation
self._registers[-1].q[:] = state.q
self._registers[-1].t = state.t
self.prev_dq_dt_values.append(self.dq_dt)
self.prev_dt_values.append(0.) # Not used
if 'SSPLMM' in self.time_integrator:
cfl = self.cfl.get_cached_max()
dtFE = self.dt / cfl * self.cfl_max / self.sspcoeff
self.prev_dtFE_values.append(dtFE)
self.sspcoeff0 = self._sspcoeff['SSP22']
elif self.accept_step == True: # Previous step was accepted
if 'SSPLMM' in self.time_integrator:
cfl = self.cfl.get_cached_max()
dtFE = self.dt / cfl * self.cfl_max / self.sspcoeff
if self.time_integrator == 'SSPLMMk3' and self.check_lmm_cond:
self.lmm_cond = self.check_3rd_ord_cond(state,step_index,dtFE)
if not self.lmm_cond:
self.accept_step = False
state.q[:] = self._registers[-1].q
self.dq_dt = self.prev_dq_dt_values[-1]
state.t = self._registers[-1].t
self.status['numsteps'] -= 1
return self.status['numsteps'] + 1
if step_index <= len(self._registers): # Startup
if self.time_integrator == 'SSPLMMk3':
self.prev_dq_dt_values.append(self.dq_dt)
self.prev_dt_values.append(self.dt_old)
self.prev_dtFE_values.append(dtFE)
else:
if self.time_integrator == 'SSPLMMk3':
self.prev_dq_dt_values = self.prev_dq_dt_values[1:] + self.prev_dq_dt_values[:1]
self.prev_dq_dt_values[-1] = self.dq_dt
# Roll and update prev_dt_values and prev_dtFE_values lists
self.prev_dt_values = self.prev_dt_values[1:] + self.prev_dt_values[:1]
self.prev_dt_values[-1] = self.dt_old
self.prev_dtFE_values = self.prev_dtFE_values[1:] + self.prev_dtFE_values[:1]
self.prev_dtFE_values[-1] = dtFE
elif self.time_integrator == 'LMM':
if step_index <= len(self._registers):
self.prev_dq_dt_values.append(self.dq_dt)
else:
self.prev_dq_dt_values = self.prev_dq_dt_values[1:] + self.prev_dq_dt_values[:1]
self.prev_dq_dt_values[-1] = self.dq_dt
# Roll and update saved solution
self._registers = self._registers[1:] + self._registers[:1]
self._registers[-1].q[:] = state.q
self._registers[-1].t = state.t
return step_index
def check_3rd_ord_cond(self,state,step_index,dtFE):
r"""
This routine checks the additional conditions for the 3rd-order SSPLMMs.
This is a posteriori check after a step is accepted.
In particular, there is a condition on the step size for the starting values and
a condition on the ratio of forward Euler step sizes at very step.
If the conditions are violated we muct retrieve the previous solution and discard
that step; otherwise the step is accepted.
"""
lmm_cond = True
if step_index <= len(self._registers):
rho = 0.6 if len(self._registers) == 4 else 0.57
if self.dt > rho * dtFE:
lmm_cond = False
rhoFE = 0.9 if len(self._registers) == 4 else 0.962
dtFEm1 = self.prev_dtFE_values[-1]
if rhoFE * dtFEm1 > dtFE or dtFE > dtFEm1 / rhoFE:
lmm_cond = False
return lmm_cond
def _set_mthlim(self):
self._mthlim = self.limiters
if not isinstance(self.limiters,list): self._mthlim=[self._mthlim]
if len(self._mthlim)==1: self._mthlim = self._mthlim * self.num_waves
if len(self._mthlim)!=self.num_waves:
raise Exception('Length of solver.limiters is not equal to 1 or to solver.num_waves')
def dq(self,state):
"""
Evaluate dq/dt * (delta t)
"""
deltaq = self.dq_hyperbolic(state)
if self.dq_src is not None:
deltaq+=self.dq_src(self,state,self.dt)
return deltaq
def dq_hyperbolic(self,state):
raise NotImplementedError('You must subclass SharpClawSolver.')
def dqdt(self,state):
"""
Evaluate dq/dt. This routine is used for implicit time stepping.
"""
self.dt = 1
dq_dt = self.dq_hyperbolic(state)
if self.dq_src is not None:
dq_dt += self.dq_src(self,state,self.dt)
return dq_dt.flatten('f')
def _set_fortran_parameters(self,state,clawparams,workspace,reconstruct):
"""
Set parameters for Fortran modules used by SharpClaw.
The modules should be imported and passed as arguments to this function.
"""
grid = state.grid
clawparams.num_dim = grid.num_dim
clawparams.lim_type = self.lim_type
clawparams.weno_order = self.weno_order
clawparams.char_decomp = self.char_decomp
clawparams.tfluct_solver = self.tfluct_solver
clawparams.fwave = self.fwave
clawparams.index_capa = state.index_capa+1
clawparams.num_waves = self.num_waves
clawparams.alloc_clawparams()
for idim in range(grid.num_dim):
clawparams.xlower[idim]=grid.dimensions[idim].lower
clawparams.xupper[idim]=grid.dimensions[idim].upper
clawparams.dx =grid.delta
clawparams.mthlim =self._mthlim
maxnx = max(grid.num_cells)+2*self.num_ghost
workspace.alloc_workspace(maxnx,self.num_ghost,state.num_eqn,self.num_waves,self.char_decomp)
reconstruct.alloc_recon_workspace(maxnx,self.num_ghost,state.num_eqn,self.num_waves,
clawparams.lim_type,clawparams.char_decomp)
def _allocate_registers(self,solution):
r"""
Instantiate State objects for Runge--Kutta stages and Linear Multistep method steps.
This routine is only used by method-of-lines solvers (SharpClaw),
not by the Classic solvers. It allocates additional State objects
to store the intermediate stages used by Runge--Kutta and Multistep
time integrators.
If we create a MethodOfLinesSolver subclass, this should be moved there.
"""
# Generally the number of registers for the starting method should be at most
# equal to the number of registers of the LMM
if self.time_integrator == 'Euler': nregisters=0
elif self.time_integrator == 'SSP33': nregisters=1
elif self.time_integrator == 'SSP104': nregisters=1
elif self.time_integrator == 'RK': nregisters=len(self.b)+1
elif self.time_integrator == 'SSPLMMk2': nregisters=self.lmm_steps
elif self.time_integrator == 'SSPLMMk3': nregisters=self.lmm_steps
elif self.time_integrator == 'LMM': nregisters=len(self.alpha)
else:
raise Exception('Unrecognized time integrator: '+self.time_integrator)
state = solution.states[0]
# Use the same class constructor as the solution for the Runge Kutta stages
self._registers = []
for i in range(nregisters):
import copy
self._registers.append(copy.deepcopy(state))
def accept_reject_step(self,state):
r"""
Decide whether to accept or not the current step.
For Runge-Kutta methods the step is accepted if cfl <= cfl_max.
For SSPLMM32 the choice of step-size guarantees the cfl condition is satisfied for the steps the LMM
is used. Hence, we need to check the cfl condition only for the starting steps.
"""
accept_step = True
cfl = self.cfl.get_cached_max()
if 'LMM' in self.time_integrator:
step_index = self.status['numsteps'] + 1
# Condition for starting RK methods
if step_index < len(self._registers):
if self.time_integrator == 'LMM':
sspcoeff_ratio = 1.
else:
sspcoeff_ratio = self.sspcoeff0/self.sspcoeff
if cfl > sspcoeff_ratio * self.cfl_max:
accept_step = False
# Check cfl condition for Runge-Kutta methods
else:
if cfl > self.cfl_max:
accept_step = False
return accept_step
def get_dt_new(self):
r"""
Set size of next step depending on the time integrator and
whether or not the current step was accepted.
"""
self.dt_old = self.dt
cfl = self.cfl.get_cached_max()
if 'SSPLMM' in self.time_integrator:
step_index = self.status['numsteps'] + 1
if step_index < len(self._registers):
# Step-size update of starting methods
sspcoeff_ratio = self.sspcoeff0/self.sspcoeff
self.dt = sspcoeff_ratio * self.dt * self.cfl_desired / cfl
if self.time_integrator == 'SSPLMMk3' and self.check_lmm_cond and not self.lmm_cond:
rho = 0.6 if len(self._registers)== 4 else 0.57
self.dt = rho * self.dt
else:
# Step size selection guarantees CFL condition is satified.
# Only need to check 3rd-order LMM's condition
if self.accept_step:
s = len(self._registers)
p = int(self.time_integrator[-1])
mu = min([self.prev_dtFE_values[i] for i in range(s)])
H = sum(self.prev_dt_values[1:])
self.dt = H * mu / (H + (p-1)*mu)
elif self.time_integrator == 'SSPLMMk3' and self.check_lmm_cond:
self.dt = 0.5 * self.dt
# Step-size update for RK methods
else:
self.dt = self.dt * self.cfl_desired / cfl
# ========================================================================
class SharpClawSolver1D(SharpClawSolver):
# ========================================================================
"""
SharpClaw solver for one-dimensional problems.
Used to solve 1D hyperbolic systems using the SharpClaw algorithms,
which are based on WENO reconstruction and Runge-Kutta time stepping.
"""
__doc__ += add_parent_doc(SharpClawSolver)
def __init__(self,riemann_solver=None,claw_package=None):
r"""
See :class:`SharpClawSolver1D` for more info.
"""
self.num_dim = 1
self.reflect_index = [1]
super(SharpClawSolver1D,self).__init__(riemann_solver,claw_package)
def dq_hyperbolic(self,state):
r"""
Compute dq/dt * (delta t) for the hyperbolic hyperbolic system.
Note that the capa array, if present, should be located in the aux
variable.
Indexing works like this (here num_ghost=2 as an example)::
0 1 2 3 4 mx+num_ghost-2 mx+num_ghost mx+num_ghost+2
| mx+num_ghost-1 | mx+num_ghost+1
| | | | | ... | | | | |
0 1 | 2 3 mx+num_ghost-2 |mx+num_ghost
mx+num_ghost-1 mx+num_ghost+1
The top indices represent the values that are located on the grid
cell boundaries such as waves, s and other Riemann problem values,
the bottom for the cell centered values such as q. In particular
the ith grid cell boundary has the following related information::
i-1 i i+1
| | |
| i-1 | i |
| | |
Again, grid cell boundary quantities are at the top, cell centered
values are in the cell.
"""
import numpy as np
self._apply_bcs(state)
q = self.qbc
aux = self.auxbc
grid = state.grid
mx = grid.num_cells[0]
ixy=1
if self.kernel_language=='Fortran':
rp1 = self.rp.rp1._cpointer
if self.tfluct_solver:
tfluct1 = self.tfluct.tfluct1._cpointer
else:
tfluct1 = self.tfluct
dq,cfl=self.fmod.flux1(q,self.auxbc,self.dt,state.t,ixy,mx,self.num_ghost,mx,rp1,tfluct1)
elif self.kernel_language=='Python':
dtdx = np.zeros( (mx+2*self.num_ghost) ,order='F')
dq = np.zeros( (state.num_eqn,mx+2*self.num_ghost) ,order='F')
# Find local value for dt/dx
if state.index_capa>=0:
dtdx = self.dt / (grid.delta[0] * aux[state.index_capa,:])
else:
dtdx += self.dt/grid.delta[0]
if aux.shape[0]>0:
aux_l=aux[:,:-1]
aux_r=aux[:,1: ]
else:
aux_l = None
aux_r = None
#Reconstruct (wave reconstruction uses a Riemann solve)
if self.lim_type==-1: #1st-order Godunov
ql=q; qr=q
elif self.lim_type==0: #Unlimited reconstruction
raise NotImplementedError('Unlimited reconstruction not implemented')
elif self.lim_type==1: #TVD Reconstruction
raise NotImplementedError('TVD reconstruction not implemented')
elif self.lim_type==2: #WENO Reconstruction
if self.char_decomp==0: #No characteristic decomposition
ql,qr=recon.weno(5,q)
elif self.char_decomp==1: #Wave-based reconstruction
q_l=q[:,:-1]
q_r=q[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
ql,qr=recon.weno5_wave(q,wave,s)
elif self.char_decomp==2: #Characteristic-wise reconstruction
raise NotImplementedError
# Solve Riemann problem at each interface
q_l=qr[:,:-1]
q_r=ql[:,1: ]
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Loop limits for local portion of grid
# THIS WON'T WORK IN PARALLEL!
LL = self.num_ghost - 1
UL = grid.num_cells[0] + self.num_ghost + 1
# Compute maximum wave speed
cfl = 0.0
for mw in range(self.num_waves):
smax1 = np.max( dtdx[LL :UL] *s[mw,LL-1:UL-1])
smax2 = | np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1]) | numpy.max |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 10:09:38 2017
@author: tih
"""
import os
import gdal
import osr
import scipy
import numpy as np
import pandas as pd
Startdate ="2017-01-01"
Enddate ="2017-21-21"
Temp_folder = r"K:\Weather_Data\Model\GLDAS\three_hourly\tair_f_inst\Tair_GLDAS-NOAH_C_3hour_{yyyy}.{mm:02d}.{dd:02d}_4.tif"
Pres_folder = r"K:\Weather_Data\Model\GLDAS\three_hourly\psurf_f_inst\P_GLDAS-NOAH_kpa_3hour_{yyyy}.{mm:02d}.{dd:02d}_4.tif"
Hum_folder = r"K:\Weather_Data\Model\GLDAS\three_hourly\qair_f_inst\Hum_GLDAS-NOAH_kg-kg_3hour_{yyyy}.{mm:02d}.{dd:02d}_4.tif"
out_folder = r"K:\Weather_Data\Model\GLDAS\three_hourly\relative_humidity_inst\Humidity_GLDAS-NOAH_Percentage_3hour_{yyyy}.{mm:02d}.{dd:02d}_4.tif"
folder_dir_out = os.path.dirname(out_folder)
if not os.path.exists(folder_dir_out):
os.makedirs(folder_dir_out)
Dates = pd.date_range(Startdate, Enddate, freq = "D")
def Open_array_info(filename=''):
"""
Opening a tiff info, for example size of array, projection and transform matrix.
Keyword Arguments:
filename -- 'C:/file/to/path/file.tif' or a gdal file (gdal.Open(filename))
string that defines the input tiff file or gdal file
"""
try:
if filename.split('.')[-1] == 'tif':
f = gdal.Open(r"%s" %filename)
else:
f = filename
except:
f = filename
try:
geo_out = f.GetGeoTransform()
proj = f.GetProjection()
size_X = f.RasterXSize
size_Y = f.RasterYSize
f = None
except:
print('%s does not exists' %filename)
return(geo_out, proj, size_X, size_Y)
def Save_as_tiff(name='', data='', geo='', projection=''):
"""
This function save the array as a geotiff
Keyword arguments:
name -- string, directory name
data -- [array], dataset of the geotiff
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
projection -- integer, the EPSG code
"""
dir_name = os.path.dirname(name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
# save as a geotiff
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name, int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_Float32, ['COMPRESS=LZW'])
srse = osr.SpatialReference()
if projection == '':
srse.SetWellKnownGeogCS("WGS84")
else:
try:
if not srse.SetWellKnownGeogCS(projection) == 6:
srse.SetWellKnownGeogCS(projection)
else:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
except:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
dst_ds.SetProjection(srse.ExportToWkt())
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo)
dst_ds.GetRasterBand(1).WriteArray(data)
dst_ds = None
return()
def gap_filling(dataset, NoDataValue, method = 1):
"""
This function fills the no data gaps in a numpy array
Keyword arguments:
dataset -- 'C:/' path to the source data (dataset that must be filled)
NoDataValue -- Value that must be filled
"""
import watertools.General.data_conversions as DC
try:
if dataset.split('.')[-1] == 'tif':
# Open the numpy array
data = Open_tiff_array(dataset)
Save_as_tiff = 1
else:
data = dataset
Save_as_tiff = 0
except:
data = dataset
Save_as_tiff = 0
# fill the no data values
if NoDataValue is np.nan:
mask = ~(np.isnan(data))
else:
mask = ~(data==NoDataValue)
xx, yy = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0]))
xym = np.vstack( (np.ravel(xx[mask]), np.ravel(yy[mask])) ).T
data0 = np.ravel( data[:,:][mask] )
if method == 1:
interp0 = scipy.interpolate.NearestNDInterpolator( xym, data0 )
data_end = interp0( | np.ravel(xx) | numpy.ravel |
import numpy
from sklearn import preprocessing
def linear(intrinsic_process):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process[0]
observed_process[1] = intrinsic_process[1]
observed_process[2] = 0
return observed_process
def s_curve(intrinsic_process, k=1):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-0).T/2
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
t = 3 * numpy.pi * k * intrinsic_process_temp[0]
observed_process[0] = numpy.sin(t)
observed_process[1] = intrinsic_process[1]*1
observed_process[2] = numpy.sign(t) * (numpy.cos(t) - 1)
return observed_process
def severed_sphere(intrinsic_process, k1=5.5, k2=2):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
#intrinsic_process_temp = (intrinsic_process_temp.T-numpy.mean(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = numpy.sin(intrinsic_process_temp[0]*k1)*numpy.cos(intrinsic_process_temp[1]*k2)
observed_process[1] = numpy.cos(intrinsic_process_temp[0]*k1)*numpy.cos(intrinsic_process_temp[1]*k2)
observed_process[2] = numpy.sin(intrinsic_process_temp[1]*k2)
return observed_process
def twin_peaks(intrinsic_process, k=1):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-0).T/2
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process_temp[0]
observed_process[1] = intrinsic_process_temp[1]
observed_process[2] = numpy.sin(k*intrinsic_process_temp[0])*numpy.sin(k*intrinsic_process_temp[1])/3
return observed_process
def parabola2d2d(intrinsic_process, k=2):
assert intrinsic_process.shape[0] == 2
scale_x = numpy.max(intrinsic_process[0]) - numpy.min(intrinsic_process[0])
scale_y = numpy.max(intrinsic_process[1]) - numpy.min(intrinsic_process[1])
scale = max(scale_x, scale_y)
origin = numpy.mean(intrinsic_process, axis=1)
intrinsic_process_temp = (intrinsic_process.T-origin.T).T/scale
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0, :] = intrinsic_process_temp[0, :]
observed_process[1, :] = intrinsic_process_temp[1, :] - k * intrinsic_process_temp[0, :] ** 2
return observed_process
def parabola2d3d(intrinsic_process, k=3):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :]
observed_process[1, :] = intrinsic_process[1, :]
observed_process[2, :] = k * numpy.sum(intrinsic_process ** 2, axis=0)
return observed_process
def singers_mushroom(intrinsic_process):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.min(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((2, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = intrinsic_process_temp[0]+numpy.power(intrinsic_process_temp[1], 3)
observed_process[1] = intrinsic_process_temp[1]-numpy.power(intrinsic_process_temp[0], 3)
return observed_process
def singers_sphere(intrinsic_process):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = intrinsic_process
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
radius = numpy.sqrt(intrinsic_process_temp[0]**2+intrinsic_process_temp[1]**2+1)
observed_process[0] = intrinsic_process_temp[0]/radius
observed_process[1] = intrinsic_process_temp[1]/radius
observed_process[2] = 1/radius
return observed_process
def whole_sphere(intrinsic_process, k=0.5):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.mean(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
radius = numpy.sqrt(intrinsic_process_temp[0]**2+intrinsic_process_temp[1]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[numpy.where(intrinsic_process[0, :] < 0)]+numpy.pi
observed_process[0] = numpy.sin(k*radius)*numpy.sin(theta)
observed_process[1] = numpy.sin(k*radius)*numpy.cos(theta)
observed_process[2] = -numpy.cos(k*radius)
return observed_process
def photo_dist(intrinsic_process, k=1.5):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
r = numpy.sqrt(intrinsic_process[0, :] ** 2 + intrinsic_process[1, :] ** 2)
observed_process[0, :] = intrinsic_process[0, :]*(1 + k * r ** 2)
observed_process[1, :] = intrinsic_process[1, :]*(1 + k * r ** 2)
observed_process = observed_process + 0.5
return observed_process
def twirl(intrinsic_process, k=6):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
temp_mean = numpy.mean(intrinsic_process, 1)
intrinsic_process = (intrinsic_process.T - temp_mean.T).T
r = numpy.sqrt(intrinsic_process[0, :]**2 + intrinsic_process[1, :]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[intrinsic_process[0, :] < 0]+numpy.pi
newr = r
newtheta = theta + newr * k
newtheta = -newtheta
observed_process[0, :] = newr * numpy.cos(newtheta)
observed_process[1, :] = newr * numpy.sin(newtheta)
observed_process = (observed_process.T + temp_mean.T).T
return observed_process
def bend(intrinsic_process, k=45):
assert intrinsic_process.shape[0] == 2
deg = 2*numpy.pi*(k/360)
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
for x in range(0, intrinsic_process.shape[1]):
if intrinsic_process[0, x] < 0.5:
observed_process[0, x] = intrinsic_process[0, x]
observed_process[1, x] = intrinsic_process[1, x]
observed_process[2, x] = 0
else:
observed_process[0, x] = 0.5 + numpy.cos(deg)*(intrinsic_process[0, x] - 0.5)
observed_process[1, x] = intrinsic_process[1, x]
observed_process[2, x] = numpy.sin(deg) * (intrinsic_process[0, x] - 0.5)
return observed_process
def swissroll(intrinsic_process, k_r=8, k_twist=8):
assert intrinsic_process.shape[0] == 2
intrinsic_process_temp = numpy.copy(intrinsic_process)
intrinsic_process_temp = (intrinsic_process_temp.T-numpy.min(intrinsic_process_temp, axis=1).T).T
observed_process = numpy.empty((3, intrinsic_process_temp.shape[1]), dtype=numpy.float64)
observed_process[0] = k_r*intrinsic_process_temp[0] * numpy.cos(k_twist * intrinsic_process_temp[0])
observed_process[1] = intrinsic_process_temp[1]*2
observed_process[2] = k_r*intrinsic_process_temp[0] * numpy.sin(k_twist * intrinsic_process_temp[0])
return observed_process
def tube(intrinsic_process, k=160):
assert intrinsic_process.shape[0] == 2
scale = numpy.max(intrinsic_process[0]) - numpy.min(intrinsic_process[0])
radius = (360/k)/(2*numpy.pi)
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
observed_process[0] = radius * numpy.cos(2*numpy.pi*(k/360) * (intrinsic_process[0]/scale))
observed_process[1] = intrinsic_process[1]
observed_process[2] = radius * numpy.sin(2*numpy.pi*(k/360) * (intrinsic_process[0]/scale))
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
observed_process = min_max_scaler.fit_transform(observed_process.T).T
return observed_process
def helix(intrinsic_process, k=2):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((3, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :]
observed_process[1, :] = intrinsic_process[1, :] * numpy.cos(k * numpy.pi * (intrinsic_process[0, :]))
observed_process[2, :] = intrinsic_process[1, :] * numpy.sin(k * numpy.pi * (intrinsic_process[0, :]))
observed_process = observed_process + 0.5
return observed_process
def papillon(intrinsic_process, k=8):
assert intrinsic_process.shape[0] == 2
observed_process = numpy.empty((2, intrinsic_process.shape[1]), dtype=numpy.float64)
intrinsic_process = intrinsic_process - 0.5
observed_process[0, :] = intrinsic_process[0, :] + 0.5
observed_process[1, :] = intrinsic_process[1, :] + k * intrinsic_process[1, :] * intrinsic_process[0, :] ** 2 + 0.5
return observed_process
def twist(intrinsic_process, k=6):
assert intrinsic_process.shape[0] == 3
intrinsic_process = intrinsic_process - 0.5
r = numpy.sqrt(intrinsic_process[0, :]**2 + intrinsic_process[1, :]**2)
theta = numpy.arctan(intrinsic_process[1, :] / intrinsic_process[0, :])
theta[numpy.where(intrinsic_process[0, :] < 0)] = theta[numpy.where(intrinsic_process[0, :] < 0)]+numpy.pi
observed_process = numpy.empty([3, intrinsic_process.shape[1]])
observed_process[0, :] = r*numpy.cos(theta + intrinsic_process[2, :]*k)
observed_process[1, :] = r*numpy.sin(theta + intrinsic_process[2, :]*k)
observed_process[2, :] = intrinsic_process[2, :]
observed_process = observed_process + 0.5
return observed_process
def antenna(intrinsic_process, centers, amplitudes, width, angles, range_factor, reg_fact):
n_antenas = centers.shape[1]
observed_process = | numpy.zeros([n_antenas, intrinsic_process.shape[1]]) | numpy.zeros |
"""
This module is the perturbation to matrix.
"""
import numpy as np
from scipy.linalg import eigh
class Pert():
def __init__(self, H0=None, evals=None, evecs=None):
if evals is not None and evecs is not None:
self.evals, self.evecs = evals, evecs
elif H0 is not None:
self.evals, self.evecs = eigh(self.H0)
else:
raise ValueError("at least H0| evals, evecs should be given")
self.n = len(self.evals)
self.evals, self.evecs = eigh(self.H0)
self.dHH = None
def evals1(self, dH):
return self.Epert1(self.evecs, dH)
def evecs1(self, dH):
return self.Vpert1(self.evals, self.evecs, dH, self.n)
def evals2(self, dH):
return self.Epert2(self.evals, self.evecs, dH, self.n)
@staticmethod
def Epert1(evecs, dH):
return np.diag(evecs.T.conj().dot(dH).dot(evecs))
@staticmethod
def Vpert1(evals, evecs, dH, n):
dV = np.zeros((n, n), dtype='complex')
dHH = evecs.T.conj() @ dH @ evecs
for i in range(n):
for k in range(n):
if abs(evals[k] - evals[i]) > 0.000001:
dV[:, i] += dHH[k, i] / (evals[i] - evals[k]) * evecs[:, k]
return dV
@staticmethod
def Epert2(evals, evecs, dH, n):
d2E = np.zeros(n, dtype='complex')
dHH = evecs.T.conj() @ dH @ evecs
for i in range(n):
for k in range(n):
if abs(evals[k] - evals[i]) > 1e-10:
d2E[i] += dHH[i, k] * dHH[k, i] / (evals[i] - evals[k])
return d2E
def unit2(x=0.3):
return np.array([[np.cos(x), np.sin(x)],[-np.sin(x), np.cos(x)]])
def test_pert_degenerate_2d(x=0.01):
H0=np.array([[2,0],[0,2]])
evals0, evecs0=np.linalg.eigh(H0)
H1=np.array([[1,2],[3,4]])
H1+=H1.T
print(evals0)
H=H0+H1*x
evals, evecs=eigh(H)
print("Fdiff of eval: ",(evals-evals0)/x)
print("Fdiff of evec: ", (evecs-evecs0))
m=evecs0.T.conj().dot(H1).dot(evecs0)
E1, c=eigh(m)
print("Pert of eval: ", E1)
c=c-np.eye(2)
V10=np.dot( c[:,0], evecs0,)
V11=np.dot(evecs0, c[:,1])
print(V10)
print(V11)
#test_pert_degenerate_2d()
def test_pert(x, n=4):
H0 = np.random.random([n, n])
H0 = H0 + H0.T
evals0, evecs0 = eigh(H0)
dH = np.random.random([n, n])
dH = (dH + dH.T.conj())
H = H0 + dH * x
evals, evecs = eigh(H)
# eigen value perturbation (1st order)
dE = Pert.Epert1(evecs0, dH)
dE2 = Pert.Epert2(evals0, evecs0, dH, n)
print(evals)
print(evals0)
print(evals0 + x * dE)
print(evals0 + x * dE + x * x * dE2)
print(np.linalg.norm(evals0 - evals))
print(np.linalg.norm(evals0 + x * dE - evals))
print(np.linalg.norm(evals0 + x * dE + x * x * dE2 - evals))
# eigen value perturbation (2nd order)
# eigen vector perturbation
dV = (Pert.Vpert1(evals0, evecs0, dH, n))
print("dV:", dV)
print("dV_fD:", (evecs - evecs0) / x)
def gen_degerate_mat(n):
H0 = np.random.random([n, n])
H0 = H0 + H0.T
evals1, evecs = eigh(H0)
evals = evals1
evals[0] = evals[1]
H0 = evecs.dot( | np.diag(evals) | numpy.diag |
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.utils.utils` module includes common functions and
constants.
Please add anything useful or that you need throughout the whole package to
this module.
Created on Thu Feb 8 09:22:00 2013
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>
@email: <EMAIL>
@license: BSD 3-clause.
"""
import warnings
# from functools import wraps
from time import time
try:
from time import clock
except ImportError:
from time import process_time as clock
# import collections
import functools
import inspect
import numpy as np
try:
from . import consts
except (ValueError, SystemError):
from parsimony.utils import consts
# TODO: This depends on the OS. We should try to be clever here ...
time_cpu = clock # UNIX-based system measures CPU time used.
time_wall = time # UNIX-based system measures time in seconds since the epoch.
time = time_cpu # TODO: Make it so that this can be changed by settings.
__all__ = ["time_cpu", "time_wall", "time", "numpy_datatype", "deprecated",
"corr", "project", "optimal_shrinkage", "AnonymousClass",
"is_windows", "version",
"list_op"]
# _DEBUG = True
def numpy_datatype(dtype): # TODO: Keep up-to-date!
"""Convert input type representation to a numpy data type.
Parameters
----------
dtype : data-type or str
The data type representation. Likely a numpy representation, or a
string representation.
"""
# For built-in types, let numpy handle it!
if isinstance(dtype, (bool, int, float, complex)):
_ = np.zeros((1,), dtype=dtype)
dtype = _.dtype
# For special numpy types, let numpy handle it!
if isinstance(dtype, (np.bool_, np.int_, np.intc, np.intp, np.float_,
np.complex_)):
_ = np.zeros((1,), dtype=dtype)
dtype = _.dtype
# If no type given, use default type (float64)
if (dtype is None):
dtype = consts.DATA_TYPE
if hasattr(dtype, "base_dtype"): # For tensorflow inputs.
dtype = dtype.base_dtype
# Check for possible known types:
if (dtype == "float16") or (dtype == np.float16):
dtype = np.float16
elif (dtype == "float32") or (dtype == np.float32):
dtype = np.float32
elif (dtype == "float64") or (dtype == np.float64):
dtype = np.float64
elif (dtype == "int8") or (dtype == np.int8):
dtype = np.int8
elif (dtype == "int16") or (dtype == np.int16):
dtype = np.int16
elif (dtype == "int32") or (dtype == np.int32):
dtype = np.int32
elif (dtype == "int64") or (dtype == np.int64):
dtype = np.int64
elif (dtype == "uint8") or (dtype == np.uint8):
dtype = np.uint8
elif (dtype == "uint16") or (dtype == np.uint16):
dtype = np.uint16
elif (dtype == "string"):
dtype = np.string
elif (dtype == "bool") or (dtype == np.bool):
dtype = np.bool
elif (dtype == "complex64") or (dtype == np.complex64):
dtype = np.complex64
elif (dtype == "complex128") or (dtype == np.complex128):
dtype = np.complex128
elif (dtype == "qint8"):
dtype = np.qint8
elif (dtype == "qint32"):
dtype = np.qint32
elif (dtype == "quint8"):
dtype = np.quint8
else:
raise ValueError("Data-type not supported (%s)!" % (dtype,))
return dtype
class deprecated(object):
"""Decorator for marking classes, functions and class functions deprecated.
Adapted from:
https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
Parameters
----------
replaced_by : str
The name of the new class or function that replaces this class or
function.
Examples
--------
>>> import warnings
>>> from parsimony.utils import deprecated
>>>
>>> @deprecated("other_function", filter_off=False)
... def function1():
... return 3.14159
>>>
>>> @deprecated(filter_off=False)
... def function2():
... return 3.14159
>>>
>>> class Class1(object):
... @deprecated("other_function", filter_off=False)
... def method(self):
... return 2.71828
>>>
>>> @deprecated("other_function", filter_off=False)
... class Class2(object):
... pass
>>>
>>> with warnings.catch_warnings():
... warnings.filterwarnings("error") # Make warnings raise exceptions
... try:
... v = function1()
... except DeprecationWarning as warning:
... print(warning) # doctest: +ELLIPSIS
Function or method "..." is deprecated (use "..." instead).
>>> with warnings.catch_warnings():
... warnings.filterwarnings("error") # Make warnings raise exceptions
... try:
... v = function2()
... except DeprecationWarning as warning:
... print(warning) # doctest: +ELLIPSIS
Function or method "..." is deprecated.
>>> with warnings.catch_warnings():
... warnings.filterwarnings("error") # Make warnings raise exceptions
... try:
... v = Class1().method()
... except DeprecationWarning as warning:
... print(warning) # doctest: +ELLIPSIS
Function or method "..." is deprecated (use "..." instead).
>>> with warnings.catch_warnings():
... warnings.filterwarnings("error") # Make warnings raise exceptions
... try:
... v = Class2()
... except DeprecationWarning as warning:
... print(warning) # doctest: +ELLIPSIS
Class "..." is deprecated (use "..." instead).
"""
def __init__(self, replaced_by=None, filter_off=True):
if inspect.isclass(replaced_by) or inspect.isfunction(replaced_by):
raise TypeError("Reason for deprecation must be supplied")
self.replaced_by = replaced_by
self.filter_off = bool(filter_off)
def __call__(self, cls_or_func):
if inspect.isfunction(cls_or_func):
if hasattr(cls_or_func, 'func_code'):
_code = cls_or_func.func_code
else:
_code = cls_or_func.__code__
if self.replaced_by is None:
fmt = 'Function or method "{name}" is deprecated.'
else:
fmt = 'Function or method "{name}" is deprecated ' + \
'(use "{replaced_by}" instead).'
filename = _code.co_filename
lineno = _code.co_firstlineno + 1
elif inspect.isclass(cls_or_func):
if self.replaced_by is None:
fmt = 'Class "{name}" is deprecated.'
else:
fmt = 'Class "{name}" is deprecated (use "{replaced_by}" instead).'
filename = cls_or_func.__module__
lineno = 1
else:
raise TypeError(type(cls_or_func))
if self.replaced_by is None:
msg = fmt.format(name=cls_or_func.__name__)
else:
msg = fmt.format(name=cls_or_func.__name__,
replaced_by=self.replaced_by)
@functools.wraps(cls_or_func)
def new_func(*args, **kwargs):
if self.filter_off:
warnings.simplefilter("always", DeprecationWarning) # Turn off filter
# warnings.warn_explicit(msg, category=DeprecationWarning,
# filename=filename, lineno=lineno)
warnings.warn(msg, category=DeprecationWarning, stacklevel=2) # Prints function call site instead of function definition site.
if self.filter_off:
warnings.simplefilter("default", DeprecationWarning) # Reset filter
return cls_or_func(*args, **kwargs)
return new_func
#def deprecated(*replaced_by):
# """This decorator can be used to mark functions as deprecated.
#
# Useful when phasing out old API functions.
#
# Parameters
# ----------
# replaced_by : String. The name of the function that should be used instead.
# """
# arg = True
# if len(replaced_by) == 1 and isinstance(replaced_by[0], collections.Callable):
# func = replaced_by[0]
# replaced_by = None
# arg = False
# else:
# replaced_by = replaced_by[0]
#
# def outer(func):
# @wraps(func)
# def with_warning(*args, **kwargs):
# string = ""
# if replaced_by is not None:
# string = " Use %s instead." % replaced_by
#
# warnings.warn("Function " + str(func.__name__) +
# " is deprecated." + string,
# category=DeprecationWarning,
# stacklevel=2)
# return func(*args, **kwargs)
#
# with_warning.__name__ = func.__name__
# with_warning.__doc__ = func.__doc__
# with_warning.__dict__.update(func.__dict__)
#
# return with_warning
#
# if not arg:
# return outer(func)
# else:
# return outer
#@deprecated("functions.properties.Gradient.approx_grad")
#def approx_grad(f, x, eps=1e-4):
# p = x.shape[0]
# grad = np.zeros(x.shape)
# for i in xrange(p):
# x[i, 0] -= eps
# loss1 = f(x)
# x[i, 0] += 2.0 * eps
# loss2 = f(x)
# x[i, 0] -= eps
# grad[i, 0] = (loss2 - loss1) / (2.0 * eps)
#
# return grad
#def make_list(a, n, default=None):
# # If a list, but empty
# if isinstance(a, (tuple, list)) and len(a) == 0:
# a = None
# # If only one value supplied, create a list with that value
# if a != None:
# if not isinstance(a, (tuple, list)):
## a = [a for i in xrange(n)]
# a = [a] * n
# else: # None or empty list supplied, create a list with the default value
## a = [default for i in xrange(n)]
# a = [default] * n
# return a
def corr(a, b):
ma = np.mean(a)
mb = np.mean(b)
a_ = a - ma
b_ = b - mb
norma = np.sqrt(np.sum(a_ ** 2, axis=0))
normb = np.sqrt(np.sum(b_ ** 2, axis=0))
norma[norma < consts.TOLERANCE] = 1.0
normb[normb < consts.TOLERANCE] = 1.0
a_ /= norma
b_ /= normb
ip = np.dot(a_.T, b_)
if ip.shape == (1, 1):
return ip[0, 0]
else:
return ip
def project(v, u):
""" Project v onto u.
Examples
--------
>>> import numpy as np
>>> import parsimony.utils.utils as utils
>>> np.random.seed(42)
>>> a = np.random.rand(10, 1)
>>> b = np.random.rand(10, 1)
>>> utils.corr(a, b) # doctest: +ELLIPSIS
0.704...
>>> c = utils.project(a, b)
>>> abs(utils.corr(c, b) - 1.0) < 5e-16
True
"""
return (np.dot(v.T, u) / | np.dot(u.T, u) | numpy.dot |
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
# In[2]:
class Detector:
'''识别多组图片'''
def __init__(self,net_factory,data_size,batch_size,model_path):
graph=tf.Graph()
with graph.as_default():
self.image_op=tf.placeholder(tf.float32,[None,data_size,data_size,3])
self.cls_prob, self.bbox_pred, self.landmark_pred = net_factory(self.image_op, training=False)
self.sess = tf.Session()
#重载模型
saver=tf.train.Saver()
model_file=tf.train.latest_checkpoint(model_path)
saver.restore(self.sess,model_file)
self.data_size=data_size
self.batch_size=batch_size
def predict(self,databatch):
scores=[]
batch_size=self.batch_size
minibatch=[]
cur=0
#所有数据总数
n=databatch.shape[0]
#将数据整理成固定batch
while cur<n:
minibatch.append(databatch[cur:min(cur+batch_size,n),:,:,:])
cur+=batch_size
cls_prob_list=[]
bbox_pred_list=[]
landmark_pred_list=[]
for idx,data in enumerate(minibatch):
m=data.shape[0]
real_size=self.batch_size
#最后一组数据不够一个batch的处理
if m<batch_size:
keep_inds=np.arange(m)
gap=self.batch_size-m
while gap>=len(keep_inds):
gap-=len(keep_inds)
keep_inds=np.concatenate((keep_inds,keep_inds))
if gap!=0:
keep_inds=np.concatenate((keep_inds,keep_inds[:gap]))
data=data[keep_inds]
real_size=m
cls_prob,bbox_pred,landmark_pred=self.sess.run([self.cls_prob, self.bbox_pred,self.landmark_pred],
feed_dict={self.image_op: data})
cls_prob_list.append(cls_prob[:real_size])
bbox_pred_list.append(bbox_pred[:real_size])
landmark_pred_list.append(landmark_pred[:real_size])
return np.concatenate(cls_prob_list, axis=0), | np.concatenate(bbox_pred_list, axis=0) | numpy.concatenate |
# SPDX-FileCopyrightText: 2014-2020 <NAME>
#
# SPDX-License-Identifier: MIT
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import pytest
import sys
import numpy as np
from symfit import (
Fit, Parameter, Variable, Model, GradientModel
)
from symfit.core.minimizers import BFGS, DifferentialEvolution
from symfit.distributions import Gaussian
if sys.version_info >= (3, 0):
import inspect as inspect_sig
else:
import funcsigs as inspect_sig
class TestGlobalOptGaussian:
@classmethod
def setup_class(cls):
np.random.seed(0)
mean = (0.4, 0.4) # x, y mean 0.6, 0.4
cov = [[0.01**2, 0], [0, 0.01**2]]
# TODO: evaluate gaussian at 200x200 points (?!) and add appropriate noise
data = | np.random.multivariate_normal(mean, cov, 2500000) | numpy.random.multivariate_normal |
import seaborn as sns
import numpy as np
import os
import copy
import scipy.io
import pandas as pd
import pkg_resources
import nibabel as nib
import numpy.linalg as npl
import math
import nilearn.plotting
import brainsmash.mapgen.base
import brainsmash.mapgen.stats
from scipy.stats import pearsonr
def vol2fslr(volume,out,roi=False):
resource_package = 'pennlinckit'
resource_path = 'Q1-Q6_R440.HEMI.SURFACE.32k_fs_LR.surf.gii'
file = pkg_resources.resource_filename(resource_package, resource_path)
lh_inflated = file.replace('HEMI','L').replace('SURFACE','inflated')
rh_inflated = file.replace('HEMI','R').replace('SURFACE','inflated')
lh_pial = file.replace('HEMI','L').replace('SURFACE','pial')
rh_pial = file.replace('HEMI','R').replace('SURFACE','pial')
lh_white = file.replace('HEMI','L').replace('SURFACE','white')
rh_white = file.replace('HEMI','R').replace('SURFACE','white')
if roi == True:
right_command = "wb_command -volume-to-surface-mapping %s %s \
%s.R.func.gii \
-ribbon-constrained %s %s \
-volume-roi %s" %(volume,rh_inflated,out,rh_white,rh_pial,volume)
left_command = "wb_command -volume-to-surface-mapping %s %s \
%s.L.func.gii \
-ribbon-constrained %s %s \
-volume-roi %s"%(volume,lh_inflated,out,lh_white,lh_pial,volume)
if roi == False:
right_command = "wb_command -volume-to-surface-mapping %s %s \
%s.R.func.gii \
-ribbon-constrained %s %s" %(volume,rh_inflated,out,rh_white,rh_pial)
left_command = "wb_command -volume-to-surface-mapping %s %s \
%s.L.func.gii \
-ribbon-constrained %s %s" %(volume,lh_inflated,out,lh_white,lh_pial)
os.system(left_command)
os.system(right_command)
def view_surf(surf,hemi='left'):
resource_package = 'pennlinckit'
resource_path = 'Q1-Q6_R440.HEMI.SURFACE.32k_fs_LR.surf.gii'
file = pkg_resources.resource_filename(resource_package, resource_path)
if hemi == 'left': inflated = file.replace('HEMI','L').replace('SURFACE','inflated')
if hemi == 'right': inflated = file.replace('HEMI','R').replace('SURFACE','inflated')
nilearn.plotting.view_surf(inflated,surf)
def view_nifti(path):
nifti = nib.load(path)
nifti_data = nifti.get_fdata()
nib.viewers.OrthoSlicer3D(nifti_data,nifti.affine)
def yeo_partition(n_networks=17,parcels='Schaefer400'):
if parcels=='Schaefer400': resource_path = 'Schaefer2018_400Parcels_17Networks_order_info.txt'
resource_package = 'pennlinckit'
yeo_file = pkg_resources.resource_stream(resource_package, resource_path).name
full_dict_17 = {'VisCent':0,'VisPeri':1,'SomMotA':2,'SomMotB':3,'DorsAttnA':4,'DorsAttnB':5,'SalVentAttnA':6, 'SalVentAttnB':7,'LimbicA':8,
'LimbicB':9,'ContA':10,'ContB':11,'ContC':12,'DefaultA':13,'DefaultB':14,'DefaultC':15,'TempPar':16}
full_dict_7 = {'VisCent':0,'VisPeri':0,'SomMotA':1,'SomMotB':1,'DorsAttnA':2,'DorsAttnB':2,'SalVentAttnA':3, 'SalVentAttnB':3,'LimbicA':4,
'LimbicB':4,'ContA':5,'ContB':5,'ContC':5,'DefaultA':6,'DefaultB':6,'DefaultC':6,'TempPar':6}
name_dict = {0:'Visual',1:'Sensory\nMotor',2:'Dorsal\nAttention',3:'Ventral\nAttention',4:'Limbic',5:'Control',6:'Default'}
membership = np.zeros((400)).astype(str)
membership_ints = np.zeros((400)).astype(int)
yeo_df = pd.read_csv(yeo_file,sep='\t',header=None,names=['name','R','G','B','0'])['name']
for i,n in enumerate(yeo_df[::2]):
if n_networks == 17:
membership[i] = n.split('_')[2]
membership_ints[i] = int(full_dict_17[membership[i]])
if n_networks == 7:
membership_ints[i] = int(full_dict_7[n.split('_')[2]])
membership[i] = name_dict[membership_ints[i]]
if n_networks == 17: names = ['VisCent','VisPeri','SomMotA','SomMotB','DorsAttnA','DorsAttnB','SalVentAttnA','SalVentAttnB','LimbicA','LimbicB,''ContA','ContB','ContC','DefaultA','DefaultB','DefaultC','TempPar']
if n_networks == 7: names = ['Visual','Sensory Motor','Dorsal Attention','Ventral Attention', 'Limbic','Control','Default']
return membership,membership_ints,names
def spin_test(map1,map2,parcels='Schaefer400',n=1000):
if parcels == 'Schaefer400': split = 200 #where the right hemi starts
resource_package = 'pennlinckit'
resource_path = '%s_ROIwise_geodesic_distance_midthickness.mat'%(parcels)
mat_file = scipy.io.loadmat(pkg_resources.resource_stream(resource_package, resource_path))
lh_gen = brainsmash.mapgen.base.Base(map2[:split], D= mat_file['lh_dist'])
lh_maps = lh_gen(n=n)
rh_gen = brainsmash.mapgen.base.Base(map2[split:], D= mat_file['rh_dist'])
rh_maps = rh_gen(n=n)
maps = np.append(lh_maps,rh_maps,axis=1)
assert (lh_maps[0] == maps[0,:200]).all()
return brainsmash.mapgen.stats.pearsonr(map1,maps)[0]
def spin_stat(map1,map2,spincorrs):
real_r = pearsonr(map1,map2)[0]
if real_r >= 0.0:
smash_p = len(spincorrs[spincorrs>real_r])/float(len(spincorrs))
else:
smash_p = len(spincorrs[spincorrs<real_r])/float(len(spincorrs))
return smash_p
def cut_data(data,min_cut=1.5,max_cut=1.5):
"""
remove outlier so your colorscale is not driven by one or two large values
Parameters
----------
data: the data you want to cut
min_cut: std cutoff for low values
max_cut: std cutoff for high values
Returns
-------
out : cut data
"""
d = data.copy()
max_v = np.mean(d) + np.std(d)*max_cut
min_v = np.mean(d) - np.std(d)*min_cut
d[d>max_v] = max_v
d[d<min_v] = min_v
return d
def make_heatmap(data,cmap='stock'):
"""
Generate an RGB value for each value in "data"
Parameters
----------
data: the data you want to colormap
cmap: nicegreen, nicepurp, stock, Reds, or send your own seaborn color_palette / cubehelix_palette object
Returns
-------
out : RGB values
"""
if cmap == 'nicegreen': orig_colors = sns.cubehelix_palette(1001, rot=-.5, dark=.3)
elif cmap == 'nicepurp': orig_colors = sns.cubehelix_palette(1001, rot=.5, dark=.3)
elif cmap == 'stock': orig_colors = sns.color_palette("RdBu_r",n_colors=1001)
elif cmap == 'Reds': orig_colors = sns.color_palette("Reds",n_colors=1001)
else: orig_colors = cmap
norm_data = copy.copy(data)
if np.nanmin(data) < 0.0: norm_data = norm_data + (np.nanmin(norm_data)*-1)
elif np.nanmin(data) > 0.0: norm_data = norm_data - (np.nanmin(norm_data))
norm_data = norm_data / float( | np.nanmax(norm_data) | numpy.nanmax |
# -*- coding: utf-8 -*-
__all__ = ["QuadPotentialDenseAdapt", "get_dense_nuts_step", "sample"]
import numpy as np
import pymc3 as pm
import theano
from pymc3.model import all_continuous, modelcontext
from pymc3.step_methods.hmc.quadpotential import QuadPotential
from pymc3.step_methods.step_sizes import DualAverageAdaptation
from scipy.linalg import LinAlgError, cholesky, solve_triangular
from .utils import logger
class QuadPotentialDenseAdapt(QuadPotential):
"""Adapt a dense mass matrix from the sample covariances."""
def __init__(
self,
n,
initial_mean=None,
initial_cov=None,
initial_weight=0,
adaptation_window=101,
doubling=True,
update_steps=None,
dtype="float64",
):
if initial_mean is None:
initial_mean = np.zeros(n, dtype=dtype)
if initial_cov is None:
initial_cov = np.eye(n, dtype=dtype)
initial_weight = 1
if initial_cov is not None and initial_cov.ndim != 2:
raise ValueError("Initial covariance must be two-dimensional.")
if initial_mean is not None and initial_mean.ndim != 1:
raise ValueError("Initial mean must be one-dimensional.")
if initial_cov is not None and initial_cov.shape != (n, n):
raise ValueError(
"Wrong shape for initial_cov: expected %s got %s"
% (n, initial_cov.shape)
)
if len(initial_mean) != n:
raise ValueError(
"Wrong shape for initial_mean: expected %s got %s"
% (n, len(initial_mean))
)
self.dtype = dtype
self._n = n
self._cov = np.array(initial_cov, dtype=self.dtype, copy=True)
self._cov_theano = theano.shared(self._cov)
self._chol = cholesky(self._cov, lower=True)
self._chol_error = None
self._foreground_cov = _WeightedCovariance(
self._n, initial_mean, initial_cov, initial_weight, self.dtype
)
self._background_cov = _WeightedCovariance(self._n, dtype=self.dtype)
self._n_samples = 0
# For backwards compatibility
self._doubling = doubling
self._adaptation_window = int(adaptation_window)
self._previous_update = 0
# New interface
if update_steps is None:
self._update_steps = None
else:
self._update_steps = np.atleast_1d(update_steps).astype(int)
def velocity(self, x, out=None):
return | np.dot(self._cov, x, out=out) | numpy.dot |
import numpy as np
from scipy.optimize import root
from scipy.special import gammaln
from scipy.linalg import cho_factor, cho_solve
import scipy.stats
from mogp_emulator.GPParams import CovTransform, CorrTransform, GPParams
import warnings
class GPPriors(object):
"""
Class representing prior distributions on GP Hyperparameters
This class combines together the prior distributions over the
hyperparameters for a GP. These are separated out into
``mean`` (which is a separate ``MeanPriors`` object),
``corr`` (a list of distributions for the correlation
length parameters), ``cov`` for the covariance, and
``nugget`` for the nugget. These can be specified when
initializing a new object using the appropriate kwargs,
or can be set once a ``GPPriors`` object has already
been created.
In addition to kwargs for the distributions, an additional
kwarg ``n_corr`` can be used to specify the number of
correlation lengths (in the event that ``corr`` is not
provided). If ``corr`` is specified, then this will override
``n_corr`` in determining the number of correlation lengths,
so if both are provided then ``corr`` is used preferrentially.
If neither ``corr`` or ``n_corr`` is provided, an exception
will be raised.
Finally, the nugget type is required to be specified when
initializing a new object.
:param mean: Priors on mean, must be a ``MeanPriors`` object.
Optional, default is ``None`` (indicating weak
prior information).
:type mean: MeanPriors
:param corr: Priors on correlation lengths, must be a list
of prior distributions (objects derived from
``WeakPriors``). Optional, default is ``None``
(indicating weak prior information, which will
automatically create an appropriate list
of ``WeakPriors`` objects of the length specified
by ``n_corr``).
:type corr: list
:param cov: Priors on covariance. Must be a ``WeakPriors``
derived object. Optional, default is ``None``
(indicating weak prior information).
:type cov: WeakPriors
:param nugget: Priors on nugget. Only valid if the nugget
is fit. Must be a ``WeakPriors`` derived
object. Optional, default is ``None``
(indicating weak prior information).
:type nugget: WeakPriors
:param n_corr: Integer specifying number of correlation lengths.
Only used if ``corr`` is not specified. Optional,
default is ``None`` to indicate number of
correlation lengths is specified by ``corr``.
:type n_corr: int
:param nugget_type: String indicating nugget type. Must be
``"fixed"``, ``"adaptive"``, ``"fit"``,
or ``"pivot"``. Optional, default is
``"fit"``
:type nugget_type: str
"""
def __init__(self, mean=None, corr=None, cov=None, nugget=None, n_corr=None, nugget_type="fit"):
"""Create new ``GPPriors`` object.
"""
if corr is None and n_corr is None:
raise ValueError("Must provide an argument for either corr or n_corr in GPPriors")
self.mean = mean
self._n_corr = n_corr
self.corr = corr
self.cov = cov
assert nugget_type in ["fit", "adaptive", "fixed", "pivot"], "Bad value for nugget type in GPPriors"
self._nugget_type = nugget_type
self.nugget = nugget
@classmethod
def default_priors(cls, inputs, n_corr, nugget_type="fit", dist="invgamma"):
"""
Class Method to create a ``GPPriors`` object with default values
Class method that creates priors with defaults for correlation
length priors and nugget. For the correlation lengths, the
values of the inputs are used to infer a distribution that
puts 99% of the mass between the minimum and maximum grid
spacing. For the nugget (if fit), a default is used that
preferrentially uses a small nugget. The mean and covariance
priors are kept as weak prior information.
:param inputs: Input values on which the GP will be fit. Must
be a 2D numpy array with the same restrictions
as the inputs to the GP class.
:type inputs: ndarray
:param n_corr: Number of correlation lengths. Because some
kernels only use a single correlation length,
this parameter specifies how to treat the
inputs to derive the default correlation
length priors. Must be a positive integer.
:type n_corr: int
:param nugget_type: String indicating nugget type. Must be
``"fixed"``, ``"adaptive"``, ``"fit"``,
or ``"pivot"``. Optional, default is
``"fit"``
:type nugget_type: str
:param dist: Distribution to fit to the correlation lengths.
Must be either a class derived from ``WeakPriors``
with a ``default_prior`` class method, or
``"lognormal"``, ``"gamma"``, or ``"invgamma"``.
Default is ``"invgamma"``.
:type dist: str or WeakPriors derived class
:
"""
assert nugget_type in ["fit", "adaptive", "fixed", "pivot"], "Bad value for nugget type in GPPriors"
if dist.lower() == "lognormal":
dist_obj = LogNormalPrior
elif dist.lower() == "gamma":
dist_obj = GammaPrior
elif dist.lower() == "invgamma":
dist_obj = InvGammaPrior
else:
if not isinstance(dist, (LogNormalPrior, GammaPrior, InvGammaPrior)):
raise TypeError("dist must be a prior distribution to contstruct default priors")
dist_obj = dist
if inputs.shape[1] == n_corr:
modified_inputs = np.transpose(inputs)
elif n_corr == 1:
modified_inputs = np.reshape(inputs, (1, -1))
else:
raise ValueError("Number of correlation lengths not compatible with input array")
priors = [dist_obj.default_prior_corr(param) for param in modified_inputs]
priors_updated = [p if isinstance(p, dist_obj) else InvGammaPrior.default_prior_corr_mode(param)
for (p, param) in zip(priors, modified_inputs)]
if nugget_type == "fit":
nugget = InvGammaPrior.default_prior_nugget()
else:
nugget = None
return cls(mean=None, corr=priors_updated, cov=None, nugget=nugget, nugget_type=nugget_type)
@property
def mean(self):
"""
Mean Prior information
The mean prior information is held in a ``MeanPriors`` object.
Can be set using a ``MeanPriors`` object or ``None``
"""
return self._mean
@mean.setter
def mean(self, newmean):
"Setter method for mean"
if newmean is None:
self._mean = MeanPriors()
elif isinstance(newmean, MeanPriors):
self._mean = newmean
else:
try:
self._mean = MeanPriors(*newmean)
except TypeError:
raise ValueError("Bad value for defining a MeanPriors object in GPPriors, " +
"argument must be an iterable containing the mean " +
"vector and the covariance as a float/vector/matrix")
@property
def n_mean(self):
"""
Number of mean parameters
:returns: Number of parameters for the ``MeanPrior`` object. If
the mean prior is weak or there is no mean function,
returns ``None``.
:rtype: int or None
"""
return self.mean.n_params
@property
def corr(self):
"""
Correlation Length Priors
Must be a list of distributions/None. When class object is initialized, must
either set number of correlation parameters explicitly or pass a list of
prior objects. If only number of parameters, will generate a list of NoneTypes
of that length (assumes weak prior information). If list provided, will use
that and override the value of number of correlation parameters.
Can change the length by setting this attribute. n_corr will automatically update.
"""
return self._corr
@corr.setter
def corr(self, newcorr):
"setter method for corr"
if newcorr is None:
newcorr = [WeakPrior()]*self.n_corr
try:
list(newcorr)
except TypeError:
raise TypeError("Correlation priors must be a list of WeakPrior derived objects")
assert len(newcorr) > 0, "Correlation priors must be a list of nonzero length"
for d in newcorr:
if not issubclass(type(d), WeakPrior):
raise TypeError("Correlation priors must be a list of WeakPrior derived objects")
self._corr = list(newcorr)
if not self.n_corr is None and not self.n_corr == len(self._corr):
print("Length of corr argument differs from specified value of n_corr. " +
"Defaulting to the value given by the corr argument.")
self._n_corr = len(self._corr)
@property
def n_corr(self):
"""
Number of correlation length parameters
"""
return self._n_corr
@property
def cov(self):
"""Covariance Scale Priors
Prior distribution on Covariance Scale. Can be set using a ``WeakPriors``
derived object.
"""
return self._cov
@cov.setter
def cov(self, newcov):
"Setter method for cov"
if newcov is None:
newcov = WeakPrior()
if not issubclass(type(newcov), WeakPrior):
raise TypeError("Covariance prior must be a WeakPrior derived object")
self._cov = newcov
@property
def nugget_type(self):
"""
Nugget fitting method for the parent GP.
"""
return self._nugget_type
@property
def nugget(self):
"""
Nugget prior distribution
If a nugget is fit, this determines the prior used. If the nugget
is not fit, will automatically set this to ``None``.
"""
return self._nugget
@nugget.setter
def nugget(self, newnugget):
"Setter method for nugget"
if self.nugget_type in ["pivot", "adaptive", "fixed"] and not newnugget is None:
print("Nugget type does not support prior distribution, setting to None")
newnugget = None
if newnugget is None and self.nugget_type == "fit":
newnugget = WeakPrior()
if not (newnugget is None or issubclass(type(newnugget), WeakPrior)):
raise TypeError("Nugget prior must be a WeakPrior derived object or None")
self._nugget = newnugget
def _check_theta(self, theta):
"""
Perform checks on a ``GPParams`` object to ensure it matches this ``GPPriors`` object.
"""
if not isinstance(theta, GPParams):
raise TypeError("theta must be a GPParams object when computing priors in GPPriors")
assert self.n_corr == theta.n_corr, "Provided GPParams object does not have the correct number of parameters"
assert self.nugget_type == theta.nugget_type, "Provided GPParams object does not have the correct nugget type"
assert not theta.get_data() is None, "Provided GPParams object does not have its data set"
def logp(self, theta):
"""
Compute log probability given a ``GPParams`` object
Takes a ``GPParams`` object, this method computes the
sum of the log probability of all of the sub-distributions.
Returns a float.
:param theta: Hyperparameter values at which the log prior is
to be computed. Must be a ``GPParams`` object
whose attributes match this ``GPPriors`` object.
:type theta: GPParams
:returns: Sum of the log probability of all prior distributions
:rtype: float
"""
self._check_theta(theta)
logposterior = 0.
for dist, val in zip(self._corr, theta.corr):
logposterior += dist.logp(val)
logposterior += self._cov.logp(theta.cov)
if self.nugget_type == "fit":
logposterior += self._nugget.logp(theta.nugget)
return logposterior
def dlogpdtheta(self, theta):
"""
Compute derivative of the log probability given a ``GPParams`` object
Takes a ``GPParams`` object, this method computes the
derivative of the log probability of all of the
sub-distributions with respect to the raw hyperparameter
values. Returns a numpy array of length ``n_params`` (the number
of fitting parameters in the ``GPParams`` object).
:param theta: Hyperparameter values at which the log prior
derivative is to be computed. Must be a
``GPParams`` object whose attributes match
this ``GPPriors`` object.
:type theta: GPParams
:returns: Gradient of the log probability. Length will be
the value of ``n_params`` of the ``GPParams``
object.
:rtype: ndarray
"""
self._check_theta(theta)
partials = []
for dist, val in zip(self._corr, theta.corr):
partials.append(dist.dlogpdtheta(val, CorrTransform))
partials.append(self._cov.dlogpdtheta(theta.cov, CovTransform))
if self.nugget_type == "fit":
partials.append(self._nugget.dlogpdtheta(theta.nugget, CovTransform))
return np.array(partials)
def d2logpdtheta2(self, theta):
"""
Compute the second derivative of the log probability
given a ``GPParams`` object
Takes a ``GPParams`` object, this method computes the
second derivative of the log probability of all of the
sub-distributions with respect to the raw hyperparameter
values. Returns a numpy array of length ``n_params`` (the number
of fitting parameters in the ``GPParams`` object).
:param theta: Hyperparameter values at which the log prior
second derivative is to be computed. Must be a
``GPParams`` object whose attributes match
this ``GPPriors`` object.
:type theta: GPParams
:returns: Hessian of the log probability. Length will be
the value of ``n_params`` of the ``GPParams``
object. (Note that since all mixed partials
are zero, this returns the diagonal
of the Hessian as an array)
:rtype: ndarray
"""
self._check_theta(theta)
hessian = []
for dist, val in zip(self._corr, theta.corr):
hessian.append(dist.d2logpdtheta2(val, CorrTransform))
hessian.append(self._cov.d2logpdtheta2(theta.cov, CovTransform))
if self.nugget_type == "fit":
hessian.append(self._nugget.d2logpdtheta2(theta.nugget, CovTransform))
return np.array(hessian)
def sample(self):
"""
Draw a set of samples from the prior distributions
Draws a set of samples from the prior distributions associated with
this GPPriors object. Used in fitting to initialize the minimization
algorithm.
:returns: Random draw from each distribution, transformed to the
raw hyperparameter values. Will be a numpy array
with length ``n_params`` of the associated ``GPParams``
object.
"""
sample_pt = []
for dist in self._corr:
sample_pt.append(dist.sample(CorrTransform))
sample_pt.append(self._cov.sample(CovTransform))
if self.nugget_type == "fit":
sample_pt.append(self._nugget.sample(CovTransform))
return np.array(sample_pt)
def __str__(self):
return str(self._priors)
class MeanPriors(object):
"""
Object holding mean priors (mean vector and covariance float/vector/matrix
assuming a multivariate normal distribution). Includes methods for
computing the inverse and determinant of the covariance and the inverse
of the covariance multiplied by the mean.
Note that if weak prior information is provided, or if there is no
mean function, the methods here will still work correctly given the desired
calling context.
:param mean: Mean vector of the multivariate normal prior distribution
:type mean: ndarray
:param cov: Scalar variance, vector variance, or covariance matrix of the
covariance of the prior distribution. Must be a float or 1D
or 2D numpy array.
:type cov: float or ndarray
"""
def __init__(self, mean=None, cov=None):
if mean is None:
self.mean = None
if not cov is None:
warnings.warn("Both mean and cov need to be set to form a valid nontrivial " +
"MeanPriors object. mean is not provided, so ignoring the " +
"provided cov.")
self.cov = None
self.Lb = None
else:
self.mean = np.reshape(np.array(mean), (-1,))
if cov is None:
raise ValueError("Both mean and cov need to be set to form a valid MeanPriors object")
self.cov = np.array(cov)
self.Lb = None
if self.cov.ndim == 0:
assert self.cov > 0., "covariance term must be greater than zero in MeanPriors"
elif self.cov.ndim == 1:
assert len(self.cov) == len(self.mean), "mean and variances must have the same length in MeanPriors"
assert np.all(self.cov > 0.), "all variances must be greater than zero in MeanPriors"
elif self.cov.ndim == 2:
assert self.cov.shape[0] == len(self.mean), "mean and covariances must have the same shape in MeanPriors"
assert self.cov.shape[1] == len(self.mean), "mean and covariances must have the same shape in MeanPriors"
assert np.all(np.diag(self.cov) > 0.), "all covariances must be greater than zero in MeanPriors"
self.Lb = cho_factor(self.cov)
else:
raise ValueError("Bad shape for the covariance in MeanPriors")
@property
def n_params(self):
r"""
Number of parameters associated with the mean
:returns: number of mean parameters (or zero if
prior information is weak)
:rtype: int
"""
if self.mean is None:
return 0
else:
return len(self.mean)
@property
def has_weak_priors(self):
r"""
Property indicating if the Mean has weak prior information
:returns: Boolean indicating if prior information is weak
:rtype: bool
"""
return self.mean is None
def dm_dot_b(self, dm):
r"""
Take dot product of mean with a design matrix
Returns the dot product of a design matrix with
the prior distribution mean vector. If prior
information is weak or there is no mean function,
returns zeros of the appropriate shape.
:param dm: Design matrix, array with shape
``(n, n_mean)``
:type dm: ndarray or patsy.DesignMatrix
:returns: dot product of design matrix with
prior distribution mean vector.
:rtype: ndarray
"""
if self.mean is None:
return np.zeros(dm.shape[0])
else:
return np.dot(dm, self.mean)
def inv_cov(self):
r"""
Compute the inverse of the covariance matrix
Returns the inverse covariance matrix or zero
if prior information is weak. Returns a float
or a 2D numpy array with shape ``(n_mean, n_mean)``.
:returns: Inverse of the covariance matrix or
zero if prior information is weak.
If the inverse is returned, it will
be a numpy array of shape
``(n_mean, n_mean)``.
:rtype: ndarray or float
"""
if self.cov is None:
return 0.
elif self.cov.ndim < 2:
inv_cov = np.zeros((len(self.mean), len(self.mean)))
np.fill_diagonal(inv_cov, np.broadcast_to(1./self.cov, (len(self.mean),)))
return inv_cov
else:
return cho_solve(self.Lb, np.eye(len(self.mean)))
def inv_cov_b(self):
r"""
Compute the inverse of the covariance matrix times the mean vector
In the log posterior computations, the inverse of the
covariance matrix multiplied by the mean is required.
This method correctly returns zero in the event mean
prior information is weak.
:returns: Inverse covariance matrix multiplied by the
mean of the prior distribution. Returns
an array with length of the number of mean
parameters or a float (in the event of weak
prior information)
:rtype: ndarray or float
"""
if self.cov is None:
return 0.
elif self.cov.ndim < 2:
return self.mean/self.cov
else:
return cho_solve(self.Lb, self.mean)
def logdet_cov(self):
r"""
Compute the log of the determinant of the covariance
Computes the log determininant of the mean prior
covariance. Correctly returns zero if the prior
information on the mean is weak.
:returns: Log determinant of the covariance matrix
:rtype: float
"""
if self.cov is None:
return 0.
elif self.cov.ndim < 2:
return np.sum(np.log(np.broadcast_to(self.cov, (len(self.mean),))))
else:
return 2.*np.sum(np.log(np.diag(self.Lb[0])))
def __str__(self):
return "MeanPriors with mean = {} and cov = {}".format(self.mean, self.cov)
class WeakPrior(object):
r"""
Base Prior class implementing weak prior information
This was implemented to avoid using ``None`` to signify
weak prior information, which required many different
conditionals that made the code clunky. In this
implementation, all parameters have a prior distribution
to simplify implementation and clarify the methods
for computing the log probabilities.
"""
def logp(self, x):
r"""
Computes log probability at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Log probability
:rtype: float
"""
return 0.
def dlogpdx(self, x):
r"""
Computes derivative of log probability with respect
to the transformed variable at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Derivative of Log probability
:rtype: float
"""
return 0.
def dlogpdtheta(self, x, transform):
r"""
Computes derivative of log probability with respect
to the raw variable at a given value. Requires
passing the transform to apply to the variable
to correctly compute the derivative.
:param x: Value of (transformed) variable
:type x: float
:param transform: Transform to apply to the derivative
to use the chain rule to compute the
derivative. Must be one of ``CorrTransform``
or ``CovTransform``.
:type transform: CorrTransform or CovTransform
:returns: Derivative of Log probability
:rtype: float
"""
return float(self.dlogpdx(x)*transform.dscaled_draw(x))
def d2logpdx2(self, x):
r"""
Computes second derivative of log probability with respect
to the transformed variable at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Second derivative of Log probability
:rtype: float
"""
return 0.
def d2logpdtheta2(self, x, transform):
r"""
Computes second derivative of log probability with respect
to the raw variable at a given value. Requires
passing the transform to apply to the variable
to correctly compute the derivative.
:param x: Value of (transformed) variable
:type x: float
:param transform: Transform to apply to the derivative
to use the chain rule to compute the
derivative. Must be one of ``CorrTransform``
or ``CovTransform``.
:type transform: CorrTransform or CovTransform
:returns: Derivative of Log probability
:rtype: float
"""
return float(self.d2logpdx2(x)*transform.dscaled_draw(x)**2 +
self.dlogpdx(x)*transform.d2scaled_draw2(x))
def sample(self, transform=None):
r"""
Draws a random sample from the distribution and
transform to the raw parameter values
:param transform: Transform to apply to the sample.
Must be one of ``CorrTransform``
or ``CovTransform``. Note that
for a ``WeakPrior`` object this
argument is optional as it is
ignored, though derived classes
require this argument.
:type transform: CorrTransform or CovTransform
:returns: Raw random sample from the distribution
:rtype: float
"""
return float(5.*(np.random.rand() - 0.5))
class PriorDist(WeakPrior):
r"""
Generic Prior Distribution Object
This implements the generic methods for all non-weak prior
distributions such as default priors and sampling methods.
Requires a derived method to implement ``logp``, ``dlogpdx``,
``d2logpdx2``, and ``sample_x``.
"""
@classmethod
def default_prior(cls, min_val, max_val):
r"""
Computes default priors given a min and max val between which
99% of the mass should be found.
Both min and max must be positive as the supported distributions
are defined over :math:`[0, +\inf]`
This stabilizes the solution, as it prevents the algorithm
from getting stuck outside these ranges as the likelihood tends
to be flat on those areas.
Optionally, can change the distribution to be a lognormal or
gamma distribution by specifying the ``dist`` argument.
Note that the function assumes only a single input dimension is
provided. Thus, any input array will be flattened before processing.
If the root-finding algorithm fails, then the function will return
``None`` to revert to a flat prior.
:param min_val: Minimum value of the input spacing
:type min_val: float
:param max_val: Maximum value of the input spacing
:type max_val: float
:returns: Distribution with fit parameters
:rtype: Type derived from ``PriorDist``
"""
if cls == InvGammaPrior:
dist_obj = scipy.stats.invgamma
elif cls == GammaPrior:
dist_obj = scipy.stats.gamma
elif cls == LogNormalPrior:
dist_obj = scipy.stats.lognorm
else:
raise ValueError("Default prior must be invgamma, gamma, or lognormal")
assert min_val > 0., "min_val must be positive for InvGamma, Gamma, or LogNormal distributions"
assert max_val > 0., "max_val must be positive for InvGamma, Gamma, or LogNormal distributions"
assert min_val < max_val, "min_val must be less than max_val"
def f(x):
assert len(x) == 2
cdf = dist_obj(np.exp(x[0]), scale=np.exp(x[1])).cdf
return np.array([cdf(min_val) - 0.005, cdf(max_val) - 0.995])
result = root(f, np.zeros(2))
if not result["success"]:
print("Prior solver failed to converge")
return WeakPrior()
else:
return cls(np.exp(result["x"][0]), np.exp(result["x"][1]))
@classmethod
def default_prior_corr(cls, inputs):
r"""
Compute default priors on a set of inputs for the correlation length
Takes a set of inputs and computes the min and max spacing before
calling the ``default_prior`` method of the class in question to
generate a distribution. Used in computing the correlation length
default prior.
:param inputs: Input values on which the distribution will be fit.
Must be a 1D numpy array (note that 2D arrays will
be flattened).
:type inputs: ndarray
:returns: Prior distribution with fit parameters
:rtype: PriorDist derived object
"""
min_val = min_spacing(inputs)
max_val = max_spacing(inputs)
if min_val == 0. or max_val == 0.:
print("Too few unique inputs; defaulting to flat priors")
return WeakPrior()
return cls.default_prior(min_val, max_val)
def sample_x(self):
r"""
Draws a random sample from the distribution
:returns: Transformed random sample from the distribution
:rtype: float
"""
raise NotImplementedError("PriorDist does not implement a sampler")
def sample(self, transform):
r"""
Draws a random sample from the distribution and
transform to the raw parameter values
:param transform: Transform to apply to the sample.
Must be one of ``CorrTransform``
or ``CovTransform``.
:type transform: CorrTransform or CovTransform
:returns: Raw random sample from the distribution
:rtype: float
"""
return transform.inv_transform(self.sample_x())
class NormalPrior(PriorDist):
r"""
Normal Distribution Prior object
Admits input values from -inf/+inf.
Take two parameters: mean and std. Mean can take any numeric value, while std must be positive.
"""
def __init__(self, mean, std):
self.mean = mean
assert std > 0., "std parameter must be positive"
self.std = std
def logp(self, x):
r"""
Computes log probability at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Log probability
:rtype: float
"""
return -0.5*((x - self.mean)/self.std)**2 - np.log(self.std) - 0.5*np.log(2.*np.pi)
def dlogpdx(self, x):
r"""
Computes derivative of log probability with respect
to the transformed variable at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Derivative of Log probability
:rtype: float
"""
return -(x - self.mean)/self.std**2
def d2logpdx2(self, x):
r"""
Computes second derivative of log probability with respect
to the transformed variable at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Second derivative of Log probability
:rtype: float
"""
return -self.std**(-2)
def sample_x(self):
r"""
Draws a random sample from the distribution
:returns: Transformed random sample from the distribution
:rtype: float
"""
return float(scipy.stats.norm.rvs(size=1, loc=self.mean, scale=self.std))
class LogNormalPrior(PriorDist):
r"""
Normal Distribution Prior object
Admits input values from 0/+inf.
Take two parameters: shape and scale, both of which must be positive
"""
def __init__(self, shape, scale):
assert shape > 0., "shape must be greater than zero"
assert scale > 0., "scale must be greater than zero"
self.shape = shape
self.scale = scale
def logp(self, x):
r"""
Computes log probability at a given value
:param x: Value of (transformed) variable
:type x: float
:returns: Log probability
:rtype: float
"""
assert x > 0
return (-0.5*(np.log(x/self.scale)/self.shape)**2
- 0.5* | np.log(2.*np.pi) | numpy.log |
# Copyright (C) 2020-2022 <NAME>, <NAME>, and others
# SPDX-License-Identifier: MIT
from typing import Tuple
import numpy as np
from warnings import warn
from . import kernel as _kernel
from . import poly
from . import sve
class AbstractBasis:
"""Abstract base class for intermediate representation bases."""
@property
def u(self):
"""Basis functions on the (reduced) imaginary time axis.
Set of IR basis functions on the imaginary time (`tau`) or reduced
imaginary time (`x`) axis.
To obtain the value of all basis functions at a point or a array of
points `x`, you can call the function ``u(x)``. To obtain a single
basis function, a slice or a subset `l`, you can use ``u[l]``.
"""
raise NotImplementedError()
@property
def uhat(self):
"""Basis functions on the reduced Matsubara frequency (`wn`) axis.
To obtain the value of all basis functions at a Matsubara frequency
or a array of points `wn`, you can call the function ``uhat(wn)``.
Note that we expect reduced frequencies, which are simply even/odd
numbers for bosonic/fermionic objects. To obtain a single basis
function, a slice or a subset `l`, you can use ``uhat[l]``.
"""
raise NotImplementedError()
@property
def s(self):
"""Vector of singular values of the continuation kernel"""
raise NotImplementedError()
@property
def v(self):
"""Basis functions on the (reduced) real frequency axis.
Set of IR basis functions on the real frequency (`omega`) or reduced
real-frequency (`y`) axis.
To obtain the value of all basis functions at a point or a array of
points `y`, you can call the function ``v(y)``. To obtain a single
basis function, a slice or a subset `l`, you can use ``v[l]``.
"""
raise NotImplementedError()
@property
def statistics(self):
"""Quantum statistic (`"F"` for fermionic, `"B"` for bosonic)"""
raise NotImplementedError()
@property
def accuracy(self):
"""Accuracy of singular value cutoff"""
return self.s[-1] / self.s[0]
def __getitem__(self, index):
"""Return basis functions/singular values for given index/indices.
This can be used to truncate the basis to the n most significant
singular values: `basis[:3]`.
"""
raise NotImplementedError()
@property
def size(self):
"""Number of basis functions / singular values"""
return self.s.size
@property
def shape(self):
"""Shape of the basis function set"""
return self.s.shape
@property
def kernel(self):
"""Kernel of which this is the singular value expansion"""
raise NotImplementedError()
@property
def sve_result(self):
raise NotImplementedError()
@property
def lambda_(self):
"""Basis cutoff parameter Λ = β * ωmax"""
return self.kernel.lambda_
@property
def beta(self):
"""Inverse temperature or `None` because unscaled basis"""
raise NotImplementedError()
@property
def wmax(self):
"""Real frequency cutoff (this is `None` because unscaled basis)"""
raise NotImplementedError()
def default_tau_sampling_points(self):
"""Default sampling points on the imaginary time/x axis"""
return _default_sampling_points(self.u)
def default_omega_sampling_points(self):
"""Default sampling points on the real frequency axis"""
return self.v[-1].deriv().roots()
def default_matsubara_sampling_points(self, *, mitigate=True):
"""Default sampling points on the imaginary frequency axis"""
return _default_matsubara_sampling_points(self.uhat, mitigate)
@property
def is_well_conditioned(self):
"""Returns True if the sampling is expected to be well-conditioned"""
return True
class DimensionlessBasis(AbstractBasis):
"""Intermediate representation (IR) basis in reduced variables.
For a continuation kernel from real frequencies, ω ∈ [-ωmax, ωmax], to
imaginary time, τ ∈ [0, β], this class stores the truncated singular
value expansion or IR basis::
K(x, y) ≈ sum(u[l](x) * s[l] * v[l](y) for l in range(L))
The functions are given in reduced variables, ``x = 2*τ/β - 1`` and
``y = ω/ωmax``, which scales both sides to the interval ``[-1, 1]``. The
kernel then only depends on a cutoff parameter ``Λ = β * ωmax``.
Example:
The following example code assumes the spectral function is a single
pole at x = 0.2::
# Compute IR basis suitable for fermions and β*W <= 42
import sparse_ir
basis = sparse_ir.DimensionlessBasis(statistics='F', lambda_=42)
# Assume spectrum is a single pole at x = 0.2, compute G(iw)
# on the first few Matsubara frequencies
gl = basis.s * basis.v(0.2)
giw = gl @ basis.uhat([1, 3, 5, 7])
See also:
:class:`FiniteTempBasis` for a basis directly in time/frequency.
"""
def __init__(self, statistics, lambda_, eps=None, *, kernel=None,
sve_result=None):
if not (lambda_ >= 0):
raise ValueError("kernel cutoff lambda must be non-negative")
if eps is None and sve_result is None and not sve.HAVE_XPREC:
warn("xprec package is not available:\n"
"expect single precision (1.5e-8) only as both cutoff and\n"
"accuracy of the basis functions")
# Calculate basis functions from truncated singular value expansion
self._kernel = _get_kernel(statistics, lambda_, kernel)
if sve_result is None:
sve_result = sve.compute(self._kernel, eps)
u, s, v = sve_result
else:
u, s, v = sve_result
if u.shape != s.shape or s.shape != v.shape:
raise ValueError("mismatched shapes in SVE")
self._statistics = statistics
# The radius of convergence of the asymptotic expansion is Lambda/2,
# so for significantly larger frequencies we use the asymptotics,
# since it has lower relative error.
even_odd = {'F': 'odd', 'B': 'even'}[statistics]
self._u = u
self._uhat = u.hat(even_odd, n_asymp=self._kernel.conv_radius)
self._s = s
self._v = v
def __getitem__(self, index):
u, s, v = self.sve_result
sve_result = u[index], s[index], v[index]
return DimensionlessBasis(self._statistics, self._kernel.lambda_,
kernel=self._kernel, sve_result=sve_result)
@property
def statistics(self): return self._statistics
@property
def u(self) -> poly.PiecewiseLegendrePoly: return self._u
@property
def uhat(self) -> poly.PiecewiseLegendreFT: return self._uhat
@property
def s(self) -> np.ndarray: return self._s
@property
def v(self) -> poly.PiecewiseLegendrePoly: return self._v
@property
def kernel(self): return self._kernel
@property
def beta(self): return None
@property
def wmax(self): return None
@property
def sve_result(self):
return self._u, self._s, self._v
class FiniteTempBasis(AbstractBasis):
"""Intermediate representation (IR) basis for given temperature.
For a continuation kernel from real frequencies, ω ∈ [-ωmax, ωmax], to
imaginary time, τ ∈ [0, beta], this class stores the truncated singular
value expansion or IR basis::
K(τ, ω) ≈ sum(u[l](τ) * s[l] * v[l](ω) for l in range(L))
This basis is inferred from a reduced form by appropriate scaling of
the variables.
Example:
The following example code assumes the spectral function is a single
pole at ω = 2.5::
# Compute IR basis for fermions and β = 10, W <= 4.2
import sparse_ir
basis = sparse_ir.FiniteTempBasis(statistics='F', beta=10, wmax=4.2)
# Assume spectrum is a single pole at ω = 2.5, compute G(iw)
# on the first few Matsubara frequencies
gl = basis.s * basis.v(2.5)
giw = gl @ basis.uhat([1, 3, 5, 7])
"""
def __init__(self, statistics, beta, wmax, eps=None, *, kernel=None,
sve_result=None):
if not (beta > 0):
raise ValueError("inverse temperature beta must be positive")
if not (wmax >= 0):
raise ValueError("frequency cutoff must be non-negative")
if eps is None and sve_result is None and not sve.HAVE_XPREC:
warn("xprec package is not available:\n"
"expect single precision (1.5e-8) only as both cutoff and\n"
"accuracy of the basis functions")
# Calculate basis functions from truncated singular value expansion
self._kernel = _get_kernel(statistics, beta * wmax, kernel)
if sve_result is None:
sve_result = sve.compute(self._kernel, eps)
u, s, v = sve_result
else:
u, s, v = sve_result
if u.shape != s.shape or s.shape != v.shape:
raise ValueError("mismatched shapes in SVE")
if u.xmin != -1 or u.xmax != 1:
raise RuntimeError("u must be defined in the reduced variable.")
self._sve_result = sve_result
self._statistics = statistics
self._beta = beta
self._wmax = wmax
self._accuracy = s[-1] / s[0]
# The polynomials are scaled to the new variables by transforming the
# knots according to: tau = beta/2 * (x + 1), w = wmax * y. Scaling
# the data is not necessary as the normalization is inferred.
self._u = u.__class__(u.data, beta/2 * (u.knots + 1), beta/2 * u.dx, u.symm)
self._v = v.__class__(v.data, wmax * v.knots, wmax * v.dx, v.symm)
# The singular values are scaled to match the change of variables, with
# the additional complexity that the kernel may have an additional
# power of w.
self._s = np.sqrt(beta/2 * wmax) * (wmax**(-self.kernel.ypower)) * s
# HACK: as we don't yet support Fourier transforms on anything but the
# unit interval, we need to scale the underlying data. This breaks
# the correspondence between U.hat and Uhat though.
uhat_base = u.__class__(np.sqrt(beta) * u.data, u, symm=u.symm)
conv_radius = 40 * self.kernel.lambda_
_even_odd = {'F': 'odd', 'B': 'even'}[statistics]
self._uhat = uhat_base.hat(_even_odd, conv_radius)
def __getitem__(self, index):
u, s, v = self.sve_result
sve_result = u[index], s[index], v[index]
return FiniteTempBasis(self._statistics, self._beta, self._wmax,
kernel=self._kernel, sve_result=sve_result)
@property
def statistics(self): return self._statistics
@property
def beta(self): return self._beta
@property
def wmax(self): return self._wmax
@property
def u(self) -> poly.PiecewiseLegendrePoly: return self._u
@property
def uhat(self) -> poly.PiecewiseLegendreFT: return self._uhat
@property
def s(self) -> np.ndarray: return self._s
@property
def v(self) -> poly.PiecewiseLegendrePoly: return self._v
@property
def kernel(self): return self._kernel
@property
def sve_result(self): return self._sve_result
def finite_temp_bases(
beta: float, wmax: float, eps: float = None,
sve_result: tuple = None
)-> Tuple[FiniteTempBasis, FiniteTempBasis]:
"""Construct FiniteTempBasis objects for fermion and bosons
Construct FiniteTempBasis objects for fermion and bosons using
the same LogisticKernel instance.
"""
if sve_result is None:
sve_result = sve.compute(_kernel.LogisticKernel(beta*wmax), eps)
basis_f = FiniteTempBasis("F", beta, wmax, eps, sve_result=sve_result)
basis_b = FiniteTempBasis("B", beta, wmax, eps, sve_result=sve_result)
return basis_f, basis_b
def _default_sampling_points(u):
poly = u[-1]
maxima = poly.deriv().roots()
left = .5 * (maxima[:1] + poly.xmin)
right = .5 * (maxima[-1:] + poly.xmax)
return np.concatenate([left, maxima, right])
def _default_matsubara_sampling_points(uhat, mitigate=True):
# Use the (discrete) extrema of the corresponding highest-order basis
# function in Matsubara. This turns out to be close to optimal with
# respect to conditioning for this size (within a few percent).
polyhat = uhat[-1]
wn = polyhat.extrema()
# While the condition number for sparse sampling in tau saturates at a
# modest level, the conditioning in Matsubara steadily deteriorates due
# to the fact that we are not free to set sampling points continuously.
# At double precision, tau sampling is better conditioned than iwn
# by a factor of ~4 (still OK). To battle this, we fence the largest
# frequency with two carefully chosen oversampling points, which brings
# the two sampling problems within a factor of 2.
if mitigate:
wn_outer = wn[[0, -1]]
wn_diff = 2 * np.round(0.025 * wn_outer).astype(int)
if wn.size >= 20:
wn = np.hstack([wn, wn_outer - wn_diff])
if wn.size >= 42:
wn = np.hstack([wn, wn_outer + wn_diff])
wn = np.unique(wn)
# For boson, include "0".
if wn[0] % 2 == 0:
wn = np.unique( | np.hstack((0, wn)) | numpy.hstack |
# coding: utf-8
import numpy as np
def aryfmt(i):
if float("%.3f" % abs(i)) == 0:
return "%.3f" % abs(i)
return "%.3f" % i
def print_array(data):
print(" ".join(list([aryfmt(i) for i in data])))
x = np.arange(-5.0, 5.0, 0.1)
print(len(x))
print_array(list(x))
"""
maximum
"""
y = | np.maximum(0, x) | numpy.maximum |
"""Multi-dimentional Gaussian copula mutual information estimation."""
import numpy as np
from scipy.special import psi
from itertools import product
from frites.core.copnorm import copnorm_nd
###############################################################################
###############################################################################
# N-D TOOLS
###############################################################################
###############################################################################
def nd_reshape(x, mvaxis=None, traxis=-1):
"""Multi-dimentional reshaping.
This function is used to be sure that an nd array has a correct shape
of (..., mvaxis, traxis).
Parameters
----------
x : array_like
Multi-dimentional array
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
Returns
-------
x_rsh : array_like
The reshaped multi-dimentional array of shape (..., mvaxis, traxis)
"""
assert isinstance(traxis, int)
traxis = np.arange(x.ndim)[traxis]
# Create an empty mvaxis axis
if not isinstance(mvaxis, int):
x = x[..., np.newaxis]
mvaxis = -1
assert isinstance(mvaxis, int)
mvaxis = np.arange(x.ndim)[mvaxis]
# move the multi-variate and trial axis
x = np.moveaxis(x, (mvaxis, traxis), (-2, -1))
return x
def nd_shape_checking(x, y, mvaxis, traxis):
"""Check that the shape between two ndarray is consitent.
x.shape = (nx_1, ..., n_xn, x_mvaxis, traxis)
y.shape = (nx_1, ..., n_xn, y_mvaxis, traxis)
"""
assert x.ndim == y.ndim
dims = np.delete(np.arange(x.ndim), -2)
assert all([x.shape[k] == y.shape[k] for k in dims])
###############################################################################
###############################################################################
# MUTUAL INFORMATION
###############################################################################
###############################################################################
def mi_nd_gg(x, y, mvaxis=None, traxis=-1, biascorrect=True, demeaned=False,
shape_checking=True):
"""Multi-dimentional MI between two Gaussian variables in bits.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (..., y_mvaxis, traxis)
ntrl = x.shape[-1]
nvarx, nvary = x.shape[-2], y.shape[-2]
nvarxy = nvarx + nvary
# joint variable along the mvaxis
xy = np.concatenate((x, y), axis=-2)
if not demeaned:
xy -= xy.mean(axis=-1, keepdims=True)
cxy = np.einsum('...ij, ...kj->...ik', xy, xy)
cxy /= float(ntrl - 1.)
# submatrices of joint covariance
cx = cxy[..., :nvarx, :nvarx]
cy = cxy[..., nvarx:, nvarx:]
# Cholesky decomposition
chcxy = np.linalg.cholesky(cxy)
chcx = np.linalg.cholesky(cx)
chcy = np.linalg.cholesky(cy)
# entropies in nats
# normalizations cancel for mutual information
hx = np.log(np.einsum('...ii->...i', chcx)).sum(-1)
hy = np.log(np.einsum('...ii->...i', chcy)).sum(-1)
hxy = np.log(np.einsum('...ii->...i', chcxy)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vec = np.arange(1, nvarxy + 1)
psiterms = psi((ntrl - vec).astype(float) / 2.0) / 2.0
dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0
hx = hx - nvarx * dterm - psiterms[:nvarx].sum()
hy = hy - nvary * dterm - psiterms[:nvary].sum()
hxy = hxy - nvarxy * dterm - psiterms[:nvarxy].sum()
# MI in bits
i = (hx + hy - hxy) / ln2
return i
def mi_model_nd_gd(x, y, mvaxis=None, traxis=-1, biascorrect=True,
demeaned=False, shape_checking=True):
"""Multi-dimentional MI between a Gaussian and a discret variables in bits.
This function is based on ANOVA style model comparison.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
assert isinstance(y, np.ndarray) and (y.ndim == 1)
assert x.shape[-1] == len(y)
# x.shape (..., x_mvaxis, traxis)
nvarx, ntrl = x.shape[-2], x.shape[-1]
u_y = np.unique(y)
sh = x.shape[:-2]
zm_shape = list(sh) + [len(u_y)]
# joint variable along the mvaxis
if not demeaned:
x = x - x.mean(axis=-1, keepdims=True)
# class-conditional entropies
ntrl_y = np.zeros((len(u_y),), dtype=int)
hcond = np.zeros(zm_shape, dtype=float)
# c = .5 * (np.log(2. * np.pi) + 1)
for num, yi in enumerate(u_y):
idx = y == yi
xm = x[..., idx]
ntrl_y[num] = idx.sum()
xm = xm - xm.mean(axis=-1, keepdims=True)
cm = np.einsum('...ij, ...kj->...ik', xm, xm) / float(ntrl_y[num] - 1.)
chcm = np.linalg.cholesky(cm)
hcond[..., num] = np.log(np.einsum('...ii->...i', chcm)).sum(-1)
# class weights
w = ntrl_y / float(ntrl)
# unconditional entropy from unconditional Gaussian fit
cx = np.einsum('...ij, ...kj->...ik', x, x) / float(ntrl - 1.)
chc = np.linalg.cholesky(cx)
hunc = np.log(np.einsum('...ii->...i', chc)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vars = np.arange(1, nvarx + 1)
psiterms = psi((ntrl - vars).astype(float) / 2.) / 2.
dterm = (ln2 - np.log(float(ntrl - 1))) / 2.
hunc = hunc - nvarx * dterm - psiterms.sum()
dterm = (ln2 - np.log((ntrl_y - 1).astype(float))) / 2.
psiterms = np.zeros_like(ntrl_y, dtype=float)
for vi in vars:
idx = ntrl_y - vi
psiterms = psiterms + psi(idx.astype(float) / 2.)
hcond = hcond - nvarx * dterm - (psiterms / 2.)
# MI in bits
i = (hunc - np.einsum('i, ...i', w, hcond)) / ln2
return i
def cmi_nd_ggg(x, y, z, mvaxis=None, traxis=-1, biascorrect=True,
demeaned=False, shape_checking=True):
"""Multi-dimentional MI between three Gaussian variables in bits.
This function is based on ANOVA style model comparison.
Parameters
----------
x, y, z : array_like
Arrays to consider for computing the Mutual Information. The three
input variables x, y and z should have the same shape except on the
mvaxis (if needed).
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
biascorrect : bool | True
Specifies whether bias correction should be applied to the estimated MI
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
mi : array_like
The mutual information with the same shape as x, y and z without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
z = nd_reshape(z, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
nd_shape_checking(x, z, mvaxis, traxis)
# x.shape == y.shape == z.shape (..., x_mvaxis, traxis)
ntrl = x.shape[-1]
nvarx, nvary, nvarz = x.shape[-2], y.shape[-2], z.shape[-2]
nvarxy = nvarx + nvary
nvaryz = nvary + nvarz
nvarxy = nvarx + nvary
nvarxz = nvarx + nvarz
nvarxyz = nvarx + nvaryz
# joint variable along the mvaxis
xyz = np.concatenate((x, y, z), axis=-2)
if not demeaned:
xyz -= xyz.mean(axis=-1, keepdims=True)
cxyz = np.einsum('...ij, ...kj->...ik', xyz, xyz)
cxyz /= float(ntrl - 1.)
# submatrices of joint covariance
cz = cxyz[..., nvarxy:, nvarxy:]
cyz = cxyz[..., nvarx:, nvarx:]
sh = list(cxyz.shape)
sh[-1], sh[-2] = nvarxz, nvarxz
cxz = np.zeros(tuple(sh), dtype=float)
cxz[..., :nvarx, :nvarx] = cxyz[..., :nvarx, :nvarx]
cxz[..., :nvarx, nvarx:] = cxyz[..., :nvarx, nvarxy:]
cxz[..., nvarx:, :nvarx] = cxyz[..., nvarxy:, :nvarx]
cxz[..., nvarx:, nvarx:] = cxyz[..., nvarxy:, nvarxy:]
# Cholesky decomposition
chcz = np.linalg.cholesky(cz)
chcxz = np.linalg.cholesky(cxz)
chcyz = np.linalg.cholesky(cyz)
chcxyz = np.linalg.cholesky(cxyz)
# entropies in nats
# normalizations cancel for mutual information
hz = np.log(np.einsum('...ii->...i', chcz)).sum(-1)
hxz = np.log(np.einsum('...ii->...i', chcxz)).sum(-1)
hyz = np.log(np.einsum('...ii->...i', chcyz)).sum(-1)
hxyz = np.log(np.einsum('...ii->...i', chcxyz)).sum(-1)
ln2 = np.log(2)
if biascorrect:
vec = np.arange(1, nvarxyz + 1)
psiterms = psi((ntrl - vec).astype(float) / 2.0) / 2.0
dterm = (ln2 - np.log(ntrl - 1.0)) / 2.0
hz = hz - nvarz * dterm - psiterms[:nvarz].sum()
hxz = hxz - nvarxz * dterm - psiterms[:nvarxz].sum()
hyz = hyz - nvaryz * dterm - psiterms[:nvaryz].sum()
hxyz = hxyz - nvarxyz * dterm - psiterms[:nvarxyz].sum()
# MI in bits
i = (hxz + hyz - hxyz - hz) / ln2
return i
###############################################################################
###############################################################################
# GAUSSIAN COPULA MUTUAL INFORMATION
###############################################################################
###############################################################################
def gcmi_nd_cc(x, y, mvaxis=None, traxis=-1, shape_checking=True, gcrn=True):
"""GCMI between two continuous variables.
The only difference with `mi_gg` is that a normalization is performed for
each continuous variable.
Parameters
----------
x, y : array_like
Continuous variables
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
Returns
-------
mi : array_like
The mutual information with the same shape as x and y, without the
mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (..., y_mvaxis, traxis)
if gcrn:
cx, cy = copnorm_nd(x, axis=-1), copnorm_nd(y, axis=-1)
else:
cx, cy = x, y
return mi_nd_gg(cx, cy, mvaxis=-2, traxis=-1, biascorrect=True,
demeaned=True, shape_checking=False)
def gcmi_model_nd_cd(x, y, mvaxis=None, traxis=-1, shape_checking=True,
gcrn=True):
"""GCMI between a continuous and discret variables.
The only difference with `mi_gg` is that a normalization is performed for
each continuous variable.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable of shape (n_trials,)
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x is consistents. For high
performances and to avoid extensive memory usage, it's better to
already have x with a shape of (..., mvaxis, traxis) and to set this
parameter to False
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
Returns
-------
mi : array_like
The mutual information with the same shape as x, without the mvaxis and
traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
# x.shape (..., x_mvaxis, traxis)
# y.shape (traxis)
cx = copnorm_nd(x, axis=-1) if gcrn else x
return mi_model_nd_gd(cx, y, mvaxis=-2, traxis=-1, biascorrect=True,
demeaned=True, shape_checking=False)
###############################################################################
###############################################################################
# GAUSSIAN COPULA CONTIONAL MUTUAL INFORMATION
###############################################################################
###############################################################################
def gccmi_nd_ccnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,
shape_checking=True, biascorrect=True, demeaned=True):
"""Conditional GCMI between two continuous variables.
This function performs a GC-CMI between 2 continuous variables conditioned
with multiple discrete variables.
Parameters
----------
x, y : array_like
Arrays to consider for computing the Mutual Information. The two input
variables x and y should have the same shape except on the mvaxis
(if needed).
z : list | array_like
Array that describes the conditions across the trial axis. Should be a
list of arrays of shape (n_trials,) of integers
(e.g. [0, 0, ..., 1, 1, 2, 2])
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
ntrl = x.shape[-1]
# Find unique values of each discret array
prod_idx = discret_to_index(*z)
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
sh = x.shape[:-2]
zm_shape = list(sh) + [len(prod_idx)]
# calculate gcmi for each z value
pz = np.zeros((len(prod_idx),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for num, idx in enumerate(prod_idx):
pz[num] = idx.sum()
if gcrn:
thsx = copnorm_nd(x[..., idx], axis=-1)
thsy = copnorm_nd(y[..., idx], axis=-1)
else:
thsx = x[..., idx]
thsy = y[..., idx]
icond[..., num] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=biascorrect, demeaned=demeaned,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(pz * icond, axis=-1)
return cmi
def cmi_nd_ggd(x, y, z, mvaxis=None, traxis=-1, shape_checking=True,
biascorrect=True, demeaned=False):
"""Conditional MI between a continuous and a discret variable.
This function performs a CMI between a continuous and a discret variable
conditioned with multiple discrete variables.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable
z : list | array_like
Array that describes the conditions across the trial axis of shape
(n_trials,)
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
demeaned : bool | False
Specifies whether the input data already has zero mean (true if it has
been copula-normalized)
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
y = nd_reshape(y, mvaxis=mvaxis, traxis=traxis)
nd_shape_checking(x, y, mvaxis, traxis)
ntrl = x.shape[-1]
assert (z.ndim == 1) and (len(z) == ntrl)
ntrl = x.shape[-1]
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
u_z = np.unique(z)
sh = x.shape[:-2]
zm_shape = list(sh) + [len(u_z)]
# calculate gcmi for each z value
pz = np.zeros((len(u_z),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for n_z, zi in enumerate(u_z):
idx = z == zi
pz[n_z] = idx.sum()
thsx, thsy = x[..., idx], y[..., idx]
icond[..., n_z] = mi_nd_gg(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=biascorrect, demeaned=demeaned,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(np.multiply(pz, icond), axis=-1)
return cmi
def gccmi_model_nd_cdnd(x, y, *z, mvaxis=None, traxis=-1, gcrn=True,
shape_checking=True):
"""Conditional GCMI between a continuous and a discret variable.
This function performs a GC-CMI between a continuous and a discret
variable conditioned with multiple discrete variables.
Parameters
----------
x : array_like
Continuous variable
y : array_like
Discret variable
z : list | array_like
Array that describes the conditions across the trial axis. Should be a
list of arrays of shape (n_trials,) of integers
(e.g. [0, 0, ..., 1, 1, 2, 2])
mvaxis : int | None
Spatial location of the axis to consider if multi-variate analysis
is needed
traxis : int | -1
Spatial location of the trial axis. By default the last axis is
considered
gcrn : bool | True
Apply a Gaussian Copula rank normalization. This operation is
relatively slow for big arrays.
shape_checking : bool | True
Perform a reshape and check that x and y shapes are consistents. For
high performances and to avoid extensive memory usage, it's better to
already have x and y with a shape of (..., mvaxis, traxis) and to set
this parameter to False
Returns
-------
cmi : array_like
Conditional mutual-information with the same shape as x and y without
the mvaxis and traxis
"""
# Multi-dimentional shape checking
if shape_checking:
x = nd_reshape(x, mvaxis=mvaxis, traxis=traxis)
assert isinstance(y, np.ndarray) and (y.ndim == 1)
assert x.shape[-1] == len(y)
ntrl = x.shape[-1]
# Find unique values of each discret array
prod_idx = discret_to_index(*z)
# sh = x.shape[:-3] if isinstance(mvaxis, int) else x.shape[:-2]
sh = x.shape[:-2]
zm_shape = list(sh) + [len(prod_idx)]
# calculate gcmi for each z value
pz = np.zeros((len(prod_idx),), dtype=float)
icond = np.zeros(zm_shape, dtype=float)
for num, idx in enumerate(prod_idx):
pz[num] = idx.sum()
if gcrn:
thsx = copnorm_nd(x[..., idx], axis=-1)
else:
thsx = x[..., idx]
thsy = y[idx]
icond[..., num] = mi_model_nd_gd(thsx, thsy, mvaxis=-2, traxis=-1,
biascorrect=True, demeaned=True,
shape_checking=False)
pz /= ntrl
# conditional mutual information
cmi = np.sum(pz * icond, axis=-1)
return cmi
def discret_to_index(*z):
"""Convert a list of discret variables into boolean indices.
Parameters
----------
z : tuple | list
List of discret variables
Returns
-------
idx : list
List of boolean arrays. Each array specify the condition to use
"""
if isinstance(z, np.ndarray) and (z.ndim == 1):
return [z == k for k in | np.unique(z) | numpy.unique |
import networkx
import numpy
import scipy
from .base_plotable_model import BasePlotableModel
class SEIRSNetworkModel(BasePlotableModel):
"""
A class to simulate the SEIRS Stochastic Network Model
======================================================
Params:
G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverse of latent period)
gamma Rate of recovery (inverse of symptomatic infectious period)
mu_I Rate of infection-related death
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of individuals interacting with global population
G_Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_Q Rate of transmission for isolated individuals (global interactions)
beta_Q_local Rate(s) of transmission (exposure) for adjacent isolated individuals (optional)
sigma_Q Rate of progression to infectious state for isolated individuals
gamma_Q Rate of recovery for isolated individuals
mu_Q Rate of infection-related death for isolated individuals
q Probability of isolated individuals interacting with global population
isolation_time Time to remain in isolation upon positive test, self-isolation, etc.
theta_E Rate of random testing for exposed individuals
theta_I Rate of random testing for infectious individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_I Rate of testing when a close contact has tested positive for infectious individuals
psi_E Probability of positive test for exposed individuals
psi_I Probability of positive test for infectious individuals
initE Initial number of exposed individuals
initI Initial number of infectious individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initQ_S Initial number of isolated susceptible individuals
initQ_E Initial number of isolated exposed individuals
initQ_I Initial number of isolated infectious individuals
initQ_R Initial number of isolated recovered individuals
(all remaining nodes initialized susceptible)
"""
plotting_number_property = "numNodes"
"""Property to access the number to base plotting on."""
def __init__(
self,
G,
beta,
sigma,
gamma,
mu_I=0,
alpha=1.0,
xi=0,
mu_0=0,
nu=0,
f=0,
p=0,
beta_local=None,
beta_pairwise_mode="infected",
delta=None,
delta_pairwise_mode=None,
G_Q=None,
beta_Q=None,
beta_Q_local=None,
sigma_Q=None,
gamma_Q=None,
mu_Q=None,
alpha_Q=None,
delta_Q=None,
theta_E=0,
theta_I=0,
phi_E=0,
phi_I=0,
psi_E=1,
psi_I=1,
q=0,
isolation_time=14,
initE=0,
initI=0,
initR=0,
initF=0,
initQ_E=0,
initQ_I=0,
transition_mode="exponential_rates",
node_groups=None,
store_Xseries=False,
seed=None,
):
if seed is not None:
numpy.random.seed(seed)
self.seed = seed
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = {
"G": G,
"G_Q": G_Q,
"beta": beta,
"sigma": sigma,
"gamma": gamma,
"mu_I": mu_I,
"xi": xi,
"mu_0": mu_0,
"nu": nu,
"f": f,
"p": p,
"beta_local": beta_local,
"beta_pairwise_mode": beta_pairwise_mode,
"alpha": alpha,
"delta": delta,
"delta_pairwise_mode": delta_pairwise_mode,
"beta_Q": beta_Q,
"beta_Q_local": beta_Q_local,
"sigma_Q": sigma_Q,
"gamma_Q": gamma_Q,
"mu_Q": mu_Q,
"alpha_Q": alpha_Q,
"delta_Q": delta_Q,
"theta_E": theta_E,
"theta_I": theta_I,
"phi_E": phi_E,
"phi_I": phi_I,
"psi_E": psi_E,
"psi_I": psi_I,
"q": q,
"isolation_time": isolation_time,
"initE": initE,
"initI": initI,
"initR": initR,
"initF": initF,
"initQ_E": initQ_E,
"initQ_I": initQ_I,
}
self.update_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = numpy.zeros(6 * self.numNodes)
self.numS = numpy.zeros(6 * self.numNodes)
self.numE = numpy.zeros(6 * self.numNodes)
self.numI = numpy.zeros(6 * self.numNodes)
self.numR = numpy.zeros(6 * self.numNodes)
self.numF = numpy.zeros(6 * self.numNodes)
self.numQ_E = numpy.zeros(6 * self.numNodes)
self.numQ_I = numpy.zeros(6 * self.numNodes)
self.N = numpy.zeros(6 * self.numNodes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
# Vectors holding the time that each node has been in a given state or in isolation:
self.timer_state = numpy.zeros((self.numNodes, 1))
self.timer_isolation = numpy.zeros(self.numNodes)
self.isolationTime = isolation_time
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of individuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numQ_E[0] = int(initQ_E)
self.numQ_I[0] = int(initQ_I)
self.numS[0] = (
self.numNodes
- self.numE[0]
- self.numI[0]
- self.numR[0]
- self.numQ_E[0]
- self.numQ_I[0]
- self.numF[0]
)
self.N[0] = self.numNodes - self.numF[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.R = 4
self.F = 5
self.Q_E = 6
self.Q_I = 7
self.X = numpy.array(
[self.S] * int(self.numS[0])
+ [self.E] * int(self.numE[0])
+ [self.I] * int(self.numI[0])
+ [self.R] * int(self.numR[0])
+ [self.F] * int(self.numF[0])
+ [self.Q_E] * int(self.numQ_E[0])
+ [self.Q_I] * int(self.numQ_I[0])
).reshape((self.numNodes, 1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if store_Xseries:
self.Xseries = numpy.zeros(
shape=(6 * self.numNodes, self.numNodes), dtype="uint8"
)
self.Xseries[0, :] = self.X.T
self.transitions = {
"StoE": {"currentState": self.S, "newState": self.E},
"EtoI": {"currentState": self.E, "newState": self.I},
"ItoR": {"currentState": self.I, "newState": self.R},
"ItoF": {"currentState": self.I, "newState": self.F},
"RtoS": {"currentState": self.R, "newState": self.S},
"EtoQE": {"currentState": self.E, "newState": self.Q_E},
"ItoQI": {"currentState": self.I, "newState": self.Q_I},
"QEtoQI": {"currentState": self.Q_E, "newState": self.Q_I},
"QItoR": {"currentState": self.Q_I, "newState": self.R},
"QItoF": {"currentState": self.Q_I, "newState": self.F},
"_toS": {"currentState": True, "newState": self.S},
}
self.transition_mode = transition_mode
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize other node metadata:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tested = numpy.array([False] * self.numNodes).reshape((self.numNodes, 1))
self.positive = numpy.array([False] * self.numNodes).reshape((self.numNodes, 1))
self.numTested = numpy.zeros(6 * self.numNodes)
self.numPositive = numpy.zeros(6 * self.numNodes)
self.testedInCurrentState = numpy.array([False] * self.numNodes).reshape(
(self.numNodes, 1)
)
self.infectionsLog = []
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if node_groups:
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {
"nodes": numpy.array(nodeList),
"mask": numpy.isin(range(self.numNodes), nodeList).reshape(
(self.numNodes, 1)
),
}
self.nodeGroupData[groupName]["numS"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numE"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numI"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numR"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numF"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numQ_E"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numQ_I"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["N"] = numpy.zeros(6 * self.numNodes)
self.nodeGroupData[groupName]["numPositive"] = numpy.zeros(
6 * self.numNodes
)
self.nodeGroupData[groupName]["numTested"] = numpy.zeros(
6 * self.numNodes
)
self.nodeGroupData[groupName]["numS"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.S
)
self.nodeGroupData[groupName]["numE"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.E
)
self.nodeGroupData[groupName]["numI"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.I
)
self.nodeGroupData[groupName]["numR"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.R
)
self.nodeGroupData[groupName]["numF"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.F
)
self.nodeGroupData[groupName]["numQ_E"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.Q_E
)
self.nodeGroupData[groupName]["numQ_I"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.Q_I
)
self.nodeGroupData[groupName]["N"][0] = self.numNodes - self.numF[0]
def update_parameters(self):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model graphs:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.G = self.parameters["G"]
# Adjacency matrix:
if type(self.G) == numpy.ndarray:
self.A = scipy.sparse.csr_matrix(self.G)
elif type(self.G) == networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(
self.G
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
# ----------------------------------------
if self.parameters["G_Q"] is None:
self.G_Q = self.G # If no Q graph is provided, use G in its place
else:
self.G_Q = self.parameters["G_Q"]
# Quarantine Adjacency matrix:
if type(self.G_Q) == numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(self.G_Q)
elif type(self.G_Q) == networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(
self.G_Q
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
# ----------------------------------------
assert (
self.numNodes == self.numNodes_Q
), "The normal and quarantine adjacency graphs must be of the same size."
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = (
numpy.array(self.parameters["beta"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["beta"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta"], shape=(self.numNodes, 1)
)
)
self.sigma = (
numpy.array(self.parameters["sigma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["sigma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["sigma"], shape=(self.numNodes, 1)
)
)
self.gamma = (
numpy.array(self.parameters["gamma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["gamma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["gamma"], shape=(self.numNodes, 1)
)
)
self.mu_I = (
numpy.array(self.parameters["mu_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_I"], shape=(self.numNodes, 1)
)
)
self.alpha = (
numpy.array(self.parameters["alpha"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["alpha"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["alpha"], shape=(self.numNodes, 1)
)
)
self.xi = (
numpy.array(self.parameters["xi"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["xi"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["xi"], shape=(self.numNodes, 1))
)
self.mu_0 = (
numpy.array(self.parameters["mu_0"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_0"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_0"], shape=(self.numNodes, 1)
)
)
self.nu = (
numpy.array(self.parameters["nu"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["nu"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["nu"], shape=(self.numNodes, 1))
)
self.f = (
| numpy.array(self.parameters["f"]) | numpy.array |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This file defines the functions used to build **badlands** meshes and surface grids.
"""
import os
import time
import numpy as np
from scipy.interpolate import griddata
if 'READTHEDOCS' not in os.environ:
from badlands import (partitionTIN, FVmethod, elevationTIN, raster2TIN, waveSed,
eroMesh, strataMesh, isoFlex, stratiWedge, carbMesh, forceSim)
def construct_mesh(input, filename, verbose=False):
"""
The following function is taking parsed values from the XML to:
* build model grids & meshes,
* initialise Finite Volume discretisation,
* define the partitioning when parallelisation is enable.
Args:
input: class containing XML input file parameters.
filename: (str) this is a string containing the path to the regular grid file.
verbose : (bool) when :code:`True`, output additional debug information (default: :code:`False`).
Returns
-------
recGrid
class describing the regular grid characteristics.
FVmesh
class describing the finite volume mesh.
force
class describing the forcing parameters.
tMesh
class describing the TIN mesh.
lGIDs
numpy 1D array containing the node indices.
fixIDs
numpy 1D array containing the fixed node indices.
inGIDs
numpy 1D array containing the node indices inside the mesh.
totPts
total number of points in the mesh.
elevation
numpy array containing the elevations for the domain.
cumdiff
cumulative total erosion/deposition changes
cumhill
cumulative hillslope erosion/deposition changes
cumfail
cumulative failure induced erosion/deposition changes
cumflex
cumlative changes induced by flexural isostasy
strata
stratigraphic class parameters
mapero
underlying erodibility map characteristics
tinFlex
class describing the flexural TIN mesh.
flex
class describing the flexural isostasy functions.
wave
class describing the wave functions.
straTIN
class describing the stratigraphic TIN mesh.
carbTIN
class describing the carbonate TIN mesh.
"""
cumflex = None
flex = None
wave = None
tinFlex = None
strata = None
mapero = None
# Get DEM regular grid and create Badlands TIN.
recGrid = raster2TIN.raster2TIN(filename, areaDelFactor=input.Afactor)
fixIDs = recGrid.boundsPt + recGrid.edgesPt
force = forceSim.forceSim(input.seafile, input.seapos, input.rainMap,
input.rainTime, input.rainVal, input.orographic, input.orographiclin,
input.rbgd, input.rmin, input.rmax, input.rzmax, input.windx,
input.windy, input.tauc, input.tauf, input.nm,
input.cw, input.hw, input.ortime, input.tectFile,
input.tectTime, recGrid.regX, recGrid.regY, input.riverPos,
input.riverTime, input.riverQws, input.riverRck, input.riverNb,
input.rockNb, input.tDisplay, input.carbValSp1, input.carbValSp2,
input.carbTime)
if input.disp3d:
force.time3d = input.time3d
if input.merge3d == 0. or input.merge3d > recGrid.resEdges:
force.merge3d = input.Afactor * recGrid.resEdges * 0.5
else:
force.merge3d = input.merge3d
# Partition the TIN
walltime = time.clock()
FVmesh = FVmethod.FVmethod(recGrid.tinMesh['vertices'], recGrid.tinMesh['triangles'],
recGrid.tinMesh['edges'])
# Perform partitioning by equivalent domain splitting
partitionIDs, RowProc, ColProc = partitionTIN.simple(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1])
FVmesh.partIDs = partitionIDs
# Get each partition global node ID
inGIDs = np.where(partitionIDs == 0)[0]
# Build Finite Volume discretisation
# Define overlapping partitions
lGIDs, localTIN = partitionTIN.overlap(recGrid.tinMesh['vertices'][:, 0], recGrid.tinMesh['vertices'][:, 1],
RowProc, ColProc, 2*recGrid.resEdges, verbose)
# Set parameters of the finite volume mesh
tMesh = FVmethod.FVmethod(localTIN['vertices'], localTIN['triangles'], localTIN['edges'])
# Define Finite Volume parameters
walltime = time.clock()
totPts = len(recGrid.tinMesh['vertices'][:, 0])
FVmesh.neighbours = np.zeros((totPts, 20), dtype=np.int32, order='F')
FVmesh.neighbours.fill(-2)
FVmesh.edge_length = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.vor_edges = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.control_volumes = np.zeros(totPts, dtype=np.float)
# Compute Finite Volume parameters
tGIDs, tNgbh, tEdgs, tVors, tVols = tMesh.construct_FV(inGIDs, lGIDs, totPts,
recGrid.resEdges*input.Afactor, verbose)
FVmesh.neighbours[tGIDs,:tMesh.maxNgbh] = tNgbh
FVmesh.edge_length[tGIDs,:tMesh.maxNgbh] = tEdgs
FVmesh.vor_edges[tGIDs,:tMesh.maxNgbh] = tVors
FVmesh.control_volumes[tGIDs] = tVols
if verbose:
print(" - FV mesh ", time.clock() - walltime)
# Define TIN parameters
if input.flexure:
elevation, cumdiff, cumhill, cumfail, cumflex, inIDs, parentIDs = _define_TINparams(totPts, input, FVmesh, recGrid, verbose)
else:
elevation, cumdiff, cumhill, cumfail, inIDs, parentIDs = _define_TINparams(totPts, input, FVmesh, recGrid, verbose)
# Build stratigraphic and erodibility meshes
if ((input.laytime and input.laytime > 0) and
(input.erolays and input.erolays >= 0)):
strata, mapero = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
elif (input.laytime and input.laytime > 0):
strata = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
elif (input.erolays and input.erolays >= 0):
mapero = _build_strateroMesh(input, FVmesh, recGrid, cumdiff, verbose)
# Set default to no rain
force.update_force_TIN(FVmesh.node_coords[:,:2])
# Flexural isostasy initialisation
if input.flexure:
flex, tinFlex, cumflex = _init_flexure(FVmesh, input, recGrid, force, elevation,
cumdiff, cumflex, totPts, verbose)
# Wavesed grid initialisation
if input.waveSed:
ref_elev = get_reference_elevation(input,recGrid,elevation)
wave = _init_wavesed(input,ref_elev, recGrid, force, verbose)
wave.build_tree(FVmesh.node_coords[:,:2])
# Stratigraphic TIN initialisation
if input.rockNb > 0:
layNb = int((input.tEnd - input.tStart)/input.laytime)+2
bPts = recGrid.boundsPt
ePts = recGrid.edgesPt
if input.restart:
straTIN = stratiWedge.stratiWedge(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.actlay, input.outDir, input.strath5file,
input.rockNb, recGrid.regX, recGrid.regY, elevation, input.rockCk, cumdiff,
input.rfolder, input.rstep)
else:
straTIN = stratiWedge.stratiWedge(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.actlay, input.outDir, input.strath5file,
input.rockNb, recGrid.regX, recGrid.regY, elevation, input.rockCk)
else:
straTIN = None
# Stratigraphic grid in case of carbonate and/or pelagic growth functions
if input.carbonate:
layNb = int((input.tEnd - input.tStart)/input.tDisplay)+2
bPts = recGrid.boundsPt
ePts = recGrid.edgesPt
if input.carbonate2:
nbSed = 3
else:
nbSed = 2
if input.restart:
carbTIN = carbMesh.carbMesh(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.outDir, input.strath5file, input.baseMap, nbSed,
recGrid.regX, recGrid.regY, elevation, input.rfolder, input.rstep)
else:
carbTIN = carbMesh.carbMesh(layNb, input.initlayers, FVmesh.node_coords[:, :2], bPts,
ePts, input.layersData, input.outDir, input.strath5file, input.baseMap, nbSed,
recGrid.regX, recGrid.regY, elevation)
else:
carbTIN = None
return recGrid, FVmesh, force, tMesh, lGIDs, fixIDs, \
inIDs, parentIDs, inGIDs, totPts, elevation, cumdiff, \
cumhill, cumfail, cumflex, strata, mapero, tinFlex, flex, wave, \
straTIN, carbTIN
def reconstruct_mesh(recGrid, input, verbose=False):
"""
The following function is used after 3D displacements to:
* rebuild model grids & meshes,
* reinitialise Finite Volume discretisation,
* redefine the partitioning when parallelisation is enable.
Args:
recGrid: class describing the regular grid characteristics.
input: class containing XML input file parameters.
verbose : (bool) when :code:`True`, output additional debug information (default: :code:`False`).
Returns
-------
FVmesh
class describing the finite volume mesh.
tMesh
class describing the TIN mesh.
lGIDs
numpy 1D array containing the node indices.
inIDs
numpy 1D array containing the local node indices inside the mesh.
inGIDs
numpy 1D array containing the node indices inside the mesh.
totPts
total number of points in the mesh.
"""
walltime = time.clock()
FVmesh = FVmethod.FVmethod(recGrid.tinMesh['vertices'], recGrid.tinMesh['triangles'],
recGrid.tinMesh['edges'])
# Perform partitioning by equivalent domain splitting
partitionIDs, RowProc, ColProc = partitionTIN.simple(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1])
FVmesh.partIDs = partitionIDs
# Get each partition global node ID
inGIDs = np.where(partitionIDs == 0)[0]
if verbose:
print(" - partition TIN amongst processors ", time.clock() - walltime)
# Define overlapping partitions
walltime = time.clock()
lGIDs, localTIN = partitionTIN.overlap(recGrid.tinMesh['vertices'][:, 0],
recGrid.tinMesh['vertices'][:, 1],
RowProc, ColProc, 2*recGrid.resEdges,
verbose)
# Set parameters of the finite volume mesh
tMesh = FVmethod.FVmethod(localTIN['vertices'], localTIN['triangles'], localTIN['edges'])
# Define Finite Volume parameters
totPts = len(recGrid.tinMesh['vertices'][:, 0])
FVmesh.neighbours = np.zeros((totPts, 20), dtype=np.int32, order='F')
FVmesh.neighbours.fill(-2)
FVmesh.edge_length = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.vor_edges = np.zeros((totPts, 20), dtype=np.float, order='F')
FVmesh.control_volumes = np.zeros(totPts, dtype=np.float)
# Compute Finite Volume parameters
tGIDs, tNgbh, tEdgs, tVors, tVols = tMesh.construct_FV(inGIDs, lGIDs, totPts,
recGrid.resEdges*input.Afactor, verbose)
FVmesh.neighbours[tGIDs,:tMesh.maxNgbh] = tNgbh
FVmesh.edge_length[tGIDs,:tMesh.maxNgbh] = tEdgs
FVmesh.vor_edges[tGIDs,:tMesh.maxNgbh] = tVors
FVmesh.control_volumes[tGIDs] = tVols
if verbose:
print(" - reconstructed FV mesh ", time.clock() - walltime)
inIDs = np.where(FVmesh.partIDs[recGrid.boundsPt:] == 0)[0]
inIDs += recGrid.boundsPt
elevationTIN.assign_parameter_pit(FVmesh.neighbours, FVmesh.control_volumes, input.diffnb,
input.diffprop, input.propa, input.propb, recGrid.boundsPt,
input.fillmax)
return FVmesh, tMesh, lGIDs, inIDs, inGIDs, totPts
def _define_TINparams(totPts, input, FVmesh, recGrid, verbose=False):
"""
This function is defining the main values declared on the TIN.
"""
walltime = time.clock()
inIDs = np.where(FVmesh.partIDs[recGrid.boundsPt:] == 0)[0]
inIDs += recGrid.boundsPt
local_elev = np.zeros(totPts)
local_elev.fill(-1.e6)
# In case of a restart read values from HDF5 files
if input.restart:
local_cum = | np.zeros(totPts) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 10:53:12 2019
@author: ivan
"""
"""
Signature: mesh.ray.intersects_location(ray_origins,
ray_directions,
multiple_hits=True)
Docstring:
Return the location of where a ray hits a surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
---------
locations: (n) sequence of (m,3) intersection points
index_ray: (n,) int, list of ray index
index_tri: (n,) int, list of triangle (face) indexes
"""
import numpy as np
import pandas as pd
import trimesh
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = | np.asarray(axis) | numpy.asarray |
import argparse
import pickle
import torch
import numpy as np
from pathlib import Path
from torch.utils.data import DataLoader
from transformers.modeling_bert import BertConfig
from transformers.tokenization_bert import BertTokenizer
from model.net import BertClassifier
from model.data import Corpus
from model.utils import PreProcessor, PadSequence
from model.uncertainty import get_mcb_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from tqdm import tqdm
from utils import Config, CheckpointManager, SummaryManager
parser = argparse.ArgumentParser()
parser.add_argument("--ind", default="trec",
help="directory of in distribution is not sub-directory")
parser.add_argument("--ood", default="cr",
help="directory of out of distribution is sub-directory from directory of in distribution")
parser.add_argument("--type", default="bert-base-uncased", help="pretrained weights of bert")
parser.add_argument('--topk', default=1, type=int)
parser.add_argument("--nh", default=14, type=int, help="using hidden states of model from the pooled output")
if __name__ == '__main__':
args = parser.parse_args()
par_dir = Path(args.ind)
sub_dir = par_dir / args.ood
backbone_dir = Path('experiments') / args.ind
detector_dir = backbone_dir / args.ood
ptr_dir = Path("pretrained")
par_config = Config(par_dir / "config.json")
sub_config = Config(sub_dir / "config.json")
model_config = Config(backbone_dir / "config.json")
# tokenizer
ptr_tokenizer = BertTokenizer.from_pretrained(
args.type, do_lower_case="uncased" in args.type
)
vocab_filepath = ptr_dir / "{}-vocab.pkl".format(args.type)
with open(vocab_filepath, mode="rb") as io:
vocab = pickle.load(io)
pad_sequence = PadSequence(
length=model_config.length, pad_val=vocab.to_indices(vocab.padding_token)
)
preprocessor = PreProcessor(
vocab=vocab, split_fn=ptr_tokenizer.tokenize, pad_fn=pad_sequence
)
# model (restore)
checkpoint_manager = CheckpointManager(backbone_dir)
checkpoint = checkpoint_manager.load_checkpoint("best.tar")
config_filepath = ptr_dir / "{}-config.json".format(args.type)
if args.nh == 1:
config = BertConfig.from_pretrained(config_filepath, output_hidden_states=False)
else:
config = BertConfig.from_pretrained(config_filepath, output_hidden_states=True)
model = BertClassifier(
config, num_classes=model_config.num_classes, vocab=preprocessor.vocab
)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device('cuda') if torch.cuda.is_available else torch.device('cpu')
model.eval()
model.to(device)
# train detector
dev_ind_ds = Corpus(par_config.dev, preprocessor.preprocess)
dev_ind_dl = DataLoader(dev_ind_ds, batch_size=model_config.batch_size * 4, num_workers=4)
dev_ood_ds = Corpus(sub_config.dev, preprocessor.preprocess)
dev_ood_dl = DataLoader(dev_ood_ds, batch_size=model_config.batch_size * 4, num_workers=4)
with open(backbone_dir / 'feature_params_{}.pkl'.format(args.nh), mode='rb') as io:
feature_params = pickle.load(io)
ops_indices = list(range(len(feature_params['mean'].keys())))
ind_features = []
for ops_idx in tqdm(ops_indices, total=len(ops_indices)):
if args.nh == 1:
layer_mean = torch.tensor(list(feature_params['mean'].values())).to(device)
layer_precision = torch.tensor(list(feature_params['precision'].values())).to(device)
else:
layer_mean = torch.tensor(list(feature_params['mean'][ops_idx].values())).to(device)
layer_precision = torch.tensor(list(feature_params['precision'][ops_idx].values())).to(device)
mb_features = []
for mb in tqdm(dev_ind_dl, total=len(dev_ind_dl)):
x_mb, _ = map(lambda elm: elm.to(device), mb)
with torch.no_grad():
_, encoded_layers = model(x_mb)
if args.nh == 1:
mb_features.extend(get_mcb_score(encoded_layers, layer_mean,
layer_precision, topk=args.topk).cpu().numpy().tolist())
else:
mb_features.extend(get_mcb_score(encoded_layers[ops_idx], layer_mean,
layer_precision, topk=args.topk).cpu().numpy().tolist())
else:
ind_features.append(mb_features)
else:
ind_features = np.concatenate(ind_features, axis=1)
ind_label = np.zeros(ind_features.shape[0])
ood_features = []
for ops_idx in tqdm(ops_indices, total=len(ops_indices)):
if args.nh == 1:
layer_mean = torch.tensor(list(feature_params['mean'].values())).to(device)
layer_precision = torch.tensor(list(feature_params['precision'].values())).to(device)
else:
layer_mean = torch.tensor(list(feature_params['mean'][ops_idx].values())).to(device)
layer_precision = torch.tensor(list(feature_params['precision'][ops_idx].values())).to(device)
mb_features = []
for mb in tqdm(dev_ood_dl, total=len(dev_ood_dl)):
x_mb, _ = map(lambda elm: elm.to(device), mb)
with torch.no_grad():
_, encoded_layers = model(x_mb)
if args.nh == 1:
mb_features.extend(get_mcb_score(encoded_layers, layer_mean,
layer_precision, topk=args.topk).cpu().numpy().tolist())
else:
mb_features.extend(get_mcb_score(encoded_layers[ops_idx], layer_mean,
layer_precision, topk=args.topk).cpu().numpy().tolist())
else:
ood_features.append(mb_features)
else:
ood_features = np.concatenate(ood_features, axis=1)
ood_label = | np.ones(ood_features.shape[0]) | numpy.ones |
import numpy as np
from matplotlib import rc
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
import matplotlib.pyplot as plt
import time
#NLL plotting
def nll_plot(nll_mean_list1,nll_var_list1,nll_mean_list2,nll_var_list2,nll_mean_list3,nll_var_list3,N_test,legend=False,last_legend_label=r'GPR'):
legend_label = []
if nll_mean_list1 is not None:
plt.gca().set_prop_cycle(None)
conf_list1 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list1]
upper1 = [y + c for y,c in zip(nll_mean_list1,conf_list1)]
lower1 = [y - c for y,c in zip(nll_mean_list1,conf_list1)]
plt.fill_between(range(0,len(nll_mean_list1)), upper1, lower1, alpha=.2)
plt.plot(range(0,len(nll_mean_list1)),nll_mean_list1)
legend_label.append(r'ALPaCA')
plt.ylabel('Negative Log Likelihood')
if nll_mean_list2 is not None:
conf_list2 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list2]
upper2 = [y + c for y,c in zip(nll_mean_list2,conf_list2)]
lower2 = [y - c for y,c in zip(nll_mean_list2,conf_list2)]
plt.fill_between(range(0,len(nll_mean_list2)), upper2, lower2, alpha=.2)
plt.plot(range(0,len(nll_mean_list2)),nll_mean_list2)
legend_label.append(r'ALPaCA (no meta)')
if nll_mean_list3 is not None:
conf_list3 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list3]
upper3 = [y + c for y,c in zip(nll_mean_list3,conf_list3)]
lower3 = [y - c for y,c in zip(nll_mean_list3,conf_list3)]
plt.fill_between(range(0,len(nll_mean_list3)), upper3, lower3, alpha=.2)
plt.plot(range(0,len(nll_mean_list3)),nll_mean_list3)
legend_label.append(last_legend_label)
if legend==True:
plt.legend(legend_label)
plt.xlabel('Timesteps')
def mse_plot(nll_mean_list1,nll_var_list1,nll_mean_list2,nll_var_list2,nll_mean_list3,nll_var_list3,N_test,legend=False):
legend_label = []
if nll_mean_list1 is not None:
plt.gca().set_prop_cycle(None)
conf_list1 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list1]
upper1 = [y + c for y,c in zip(nll_mean_list1,conf_list1)]
lower1 = [y - c for y,c in zip(nll_mean_list1,conf_list1)]
plt.fill_between(range(0,len(nll_mean_list1)), upper1, lower1, alpha=.2)
l1 = plt.plot(range(0,len(nll_mean_list1)),nll_mean_list1,label=r'ALPaCA')
legend_label.append(r'ALPaCA')
plt.ylabel('MSE')
if nll_mean_list2 is not None:
conf_list2 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list2]
upper2 = [y + c for y,c in zip(nll_mean_list2,conf_list2)]
lower2 = [y - c for y,c in zip(nll_mean_list2,conf_list2)]
plt.fill_between(range(0,len(nll_mean_list2)), upper2, lower2, alpha=.2)
l2 = plt.plot(range(0,len(nll_mean_list2)),nll_mean_list2, label=r'MAML (1 step)')
legend_label.append(r'MAML (1 step)')
if nll_mean_list3 is not None:
conf_list3 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list3]
upper3 = [y + c for y,c in zip(nll_mean_list3,conf_list3)]
lower3 = [y - c for y,c in zip(nll_mean_list3,conf_list3)]
plt.fill_between(range(0,len(nll_mean_list3)), upper3, lower3, alpha=.2)
plt.plot(range(0,len(nll_mean_list3)),nll_mean_list3, label=r'MAML (5 step)')
legend_label.append(r'GPR')
if legend==True:
plt.legend()
plt.xlabel('Timesteps')
def time_plot(nll_mean_list1,nll_var_list1,nll_mean_list2,nll_var_list2,nll_mean_list3,nll_var_list3,N_test,legend=False):
#same arguments cause I'm lazy
legend_label = []
if nll_mean_list1 is not None:
plt.gca().set_prop_cycle(None)
conf_list1 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list1]
upper1 = [y + c for y,c in zip(nll_mean_list1,conf_list1)]
lower1 = [y - c for y,c in zip(nll_mean_list1,conf_list1)]
plt.fill_between(range(0,len(nll_mean_list1)), upper1, lower1, alpha=.2)
plt.plot(range(0,len(nll_mean_list1)),nll_mean_list1)
legend_label.append(r'ALPaCA')
plt.ylabel(r'Time (s)')
if nll_mean_list2 is not None:
conf_list2 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list2]
upper2 = [y + c for y,c in zip(nll_mean_list2,conf_list2)]
lower2 = [y - c for y,c in zip(nll_mean_list2,conf_list2)]
plt.fill_between(range(0,len(nll_mean_list2)), upper2, lower2, alpha=.2)
plt.plot(range(0,len(nll_mean_list2)),nll_mean_list2)
legend_label.append(r'ALPaCA (no meta)')
if nll_mean_list3 is not None:
conf_list3 = [1.96*np.sqrt(s)/np.sqrt(N_test) for s in nll_var_list3]
upper3 = [y + c for y,c in zip(nll_mean_list3,conf_list3)]
lower3 = [y - c for y,c in zip(nll_mean_list3,conf_list3)]
plt.fill_between(range(0,len(nll_mean_list3)), upper3, lower3, alpha=.2)
plt.plot(range(0,len(nll_mean_list3)),nll_mean_list3)
legend_label.append(r'GPR')
if legend==True:
plt.legend(legend_label)
plt.xlabel('Timesteps')
def sinusoid_plot(freq,phase,amp,x_list,sigma_list,y_list,X_update, Y_update,sampling_density=101,legend_labels=['Ours', 'True']):
"""
x,y,sigma should be lists
"""
#plot given data
conf_list = [1.96*np.sqrt(s) for s in sigma_list]
upper = [y + c for y,c in zip(y_list,conf_list)]
lower = [y - c for y,c in zip(y_list,conf_list)]
plt.fill_between(x_list, upper, lower, alpha=.5)
plt.plot(x_list,y_list)
#plot true sinusoid
yr_list = [amp*np.sin(freq*x + phase) for x in x_list]
plt.plot(x_list,yr_list,color='r')
# plot update points
plt.plot(X_update[0,:,0],Y_update[0,:,0],'+',color='k',markersize=10)
plt.xlim([np.min(x_list), np.max(x_list)])
#legend
if legend_labels:
plt.legend(legend_labels + ['sampled points'])
def gen_sin_fig(agent, X,Y,freq,phase,amp,upper_x=5,lower_x=-5,point_every=0.1, label=None):
y_list = []
x_list = []
s_list = []
for p in np.arange(lower_x,upper_x,0.1):
y, s = agent.test(X, Y, [[[p]]])
y_list.append(y[0,0,0])
x_list.append(p)
if s:
s_list.append(s[0,0,0,0])
else:
s_list.append(0)
legend_labels = None
if label:
legend_labels = [label, 'True']
sinusoid_plot(freq,phase,amp,x_list,s_list,y_list,X,Y, legend_labels=legend_labels)
def gen_sin_gp_fig(agent, X,Y,freq,phase,amp,upper_x=5,lower_x=-5,point_every=0.1, label=None):
x_test = np.reshape( np.arange(lower_x,upper_x,0.1), [1,-1,1] )
y,s = agent.test(X,Y,x_test)
y = y[0,:,0]
s = s[0,:]**2
legend_labels = None
if label:
legend_labels = [label, 'True']
sinusoid_plot(freq,phase,amp,x_test[0,:,0],s,y,X,Y,legend_labels=legend_labels)
def plot_bases(x,y,indices):
x = x[0,:,0]
y = y[0,:,:]
for i in indices:
plt.figure()
plt.plot(x,y[:,i])
plt.legend([r"$\phi_{"+ str(i) +r"}(x)$"])
plt.show()
def gen_sin_bases_fig(agent, sess, x, n_bases):
phi = sess.run( agent.phi, {agent.x: x} )
plot_bases(x, phi, np.random.choice(agent.config['nn_layers'][-1],n_bases))
def plot_sample_fns(x,phi,K,L,SigEps,n_samples):
x = x[0,:,0]
phi = phi[0,:,:]
mean = np.reshape(K, [-1])
cov = np.kron(SigEps, np.linalg.inv(L))
K_vec = np.random.multivariate_normal(mean,cov,n_samples)
plt.figure()
for i in range(n_samples):
K = np.reshape(K_vec[i,:], K.shape)
y = np.squeeze(phi @ K)
plt.plot(x,y)
plt.show()
# STEP FUNCTIONS
def step_plot(x_jump,x_list,sigma_list,y_list,X_update, Y_update,sampling_density=101,legend_labels=['Ours', 'True']):
"""
x,y,sigma should be lists
"""
#plot given data
conf_list = [1.96*np.sqrt(s) for s in sigma_list]
upper = [y + c for y,c in zip(y_list,conf_list)]
lower = [y - c for y,c in zip(y_list,conf_list)]
plt.fill_between(x_list, upper, lower, alpha=.5)
plt.plot(x_list,y_list)
#plot true step
yr_list = [0.5 + 0.5*np.sign(x-x_jump) for x in x_list]
plt.plot(x_list,yr_list,color='r')
# plot update points
plt.plot(X_update[0,:,0],Y_update[0,:,0],'+',color='k',markersize=10)
plt.xlim([np.min(x_list), np.max(x_list)])
plt.ylim([-1,2])
#legend
if legend_labels:
plt.legend(legend_labels + ['sampled points'])
def multistep_plot(pt_list,x_list,sigma_list,y_list,X_update, Y_update,sampling_density=101,legend_labels=['Ours', 'True']):
"""
x,y,sigma should be lists
"""
#plot given data
conf_list = [1.96*np.sqrt(s) for s in sigma_list]
upper = [y + c for y,c in zip(y_list,conf_list)]
lower = [y - c for y,c in zip(y_list,conf_list)]
plt.fill_between(x_list, upper, lower, alpha=.5)
plt.plot(x_list,y_list)
#plot true step
#yr_list = []
x = np.reshape(x_list,[1,-1])
step_pts = np.reshape(pt_list,[-1,1])
y = 2.*np.logical_xor.reduce( x > step_pts, axis=0) - 1.
yr_list = y
# for x in x_list:
# for i in range(len(pt_list)):
# if x<pt_list[0]:
# yr_list.append(((i)%2)*2-1.0)
# break
# if i==(len(pt_list)-1) and x>pt_list[-1]:
# # print('ok')
# yr_list.append(((i+1)%2)*2-1.0)
# break
# if x>pt_list[i] and x<pt_list[i+1]:
# yr_list.append(((i+1)%2)*2-1.0)
# break
plt.plot(x_list,yr_list,color='r')
# plot update points
plt.plot(X_update[0,:,0],Y_update[0,:,0],'+',color='k',markersize=10)
plt.xlim([np.min(x_list), np.max(x_list)])
plt.ylim([-2,2])
#legend
if legend_labels:
plt.legend(legend_labels + ['sampled points'])
#do plotting
def gen_step_fig(agent,X,Y,x_jump,upper_x=5,lower_x=-5,point_every=0.1, label=None):
y_list = []
x_list = []
s_list = []
for p in np.arange(lower_x,upper_x,0.1):
y, s = agent.test(X, Y, [[[p]]])
y_list.append(y[0,0,0])
x_list.append(p)
if s:
s_list.append(s[0,0,0,0])
else:
s_list.append(0)
legend_labels = None
if label:
legend_labels = [label, 'True']
step_plot(x_jump,x_list,s_list,y_list,X,Y, legend_labels=legend_labels)
def gen_step_gp_fig(agent, X, Y, x_jump, upper_x=5,lower_x=-5,point_every=0.1, label=None):
x_test = np.reshape( | np.arange(lower_x,upper_x,0.1) | numpy.arange |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import uqra
import numpy as np, os, sys
import scipy.stats as stats
from tqdm import tqdm
import itertools, copy, math, collections
import multiprocessing as mp
import random
# warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
def observation_error(y, mu=0, cov=0.03, random_state=100):
e = stats.norm(0, cov * abs(y)).rvs(size=len(y), random_state=random_state)
return e
def list_union(ls1, ls2):
"""
append ls2 to ls1 and check if there exist duplicates
return the union of two lists and remove duplicates
"""
if ls1 is None:
ls1 = []
if ls2 is None:
ls2 = []
ls = list(copy.deepcopy(ls1)) + list(copy.deepcopy(ls2))
if len(ls) != len(set(ls1).union(set(ls2))):
print('[WARNING]: list_union: duplicate elements found in list when append to each other')
ls = list(set(ls))
return ls
def isOverfitting(cv_err):
if len(cv_err) < 3 :
return False
if cv_err[-1] > cv_err[-2] and cv_err[-2] > cv_err[0]:
print('WARNING: Overfitting')
return False
def threshold_converge(y, threshold=0.95):
y = np.array(y)
status = True if y[-1]> threshold else False
return status, threshold
def relative_converge(y, err=0.05):
"""
check if y is converge in relative error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs((y[-2]-y[-1])/ y[-1])
res = (error < err, error)
return res
def absolute_converge(y, err=1e-4):
"""
check if y is converge in absolute error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs(y[-2]-y[-1])
res = (error < err, error)
return res
def main(model_params, doe_params, solver, r=0, random_state=None):
random.seed(random_state)
ndim_deg_cases = np.array(list(itertools.product([model_params.ndim,], model_params.degs)))
main_res = []
# data_train = uqra.Data()
# data_train.xi = np.empty((model_params.ndim, 0))
# data_train.x = np.empty((model_params.ndim, 0))
# data_train.y = np.empty((0,))
xi_train= np.empty((solver.ndim, 0))
x_train = | np.empty((solver.ndim, 0)) | numpy.empty |
import numpy as np
import pytest
from topfarm.tests import npt
from topfarm.constraint_components.boundary import PolygonBoundaryComp
@pytest.mark.parametrize('boundary', [
[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2)],
[(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)], # StartEqEnd
[(0, 0), (0, 2), (2, 2), (2, 0), (1, 1)], # Clockwise
[(0, 0), (0, 2), (2, 2), (2, 0), (1, 1), (0, 0)] # StartEqEndClockwise
])
def testPolygon(boundary):
pbc = PolygonBoundaryComp(1, boundary)
np.testing.assert_array_equal(pbc.xy_boundary, [[0, 0],
[1, 1],
[2, 0],
[2, 2],
[0, 2],
[0, 0]])
def check(boundary, points, distances):
pbc = PolygonBoundaryComp(1, boundary)
d, dx, dy = pbc.calc_distance_and_gradients(points[:, 0], points[:, 1])
np.testing.assert_array_almost_equal(d, distances)
eps = 1e-7
d1, _, _ = pbc.calc_distance_and_gradients(points[:, 0] + eps, points[:, 1])
np.testing.assert_array_almost_equal((d1 - d) / eps, dx)
d2, _, _ = pbc.calc_distance_and_gradients(points[:, 0], points[:, 1] + eps)
np.testing.assert_array_almost_equal((d2 - d) / eps, dy)
def test_calc_distance_edge():
boundary = np.array([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])
points = np.array([(0.5, .2), (1, .5), (.5, 1.5), (.2, 1)])
check(boundary, points, [0.2, np.sqrt(2 * .25**2), .5 * np.sin(np.arctan(.5)), 0.2])
def test_calc_distance_edge_outside():
boundary = np.array([(0, 0), (1, 0), (2, 1), (0, 2), (0, 0)])
points = np.array([(0.5, -.2), (1.5, 0), (.5, 2), (-.2, 1)])
check(boundary, points, [-0.2, -np.sqrt(2 * .25**2), -.5 * np.sin(np.arctan(.5)), -0.2])
def test_calc_distance_point_vertical():
boundary = np.array([(0, 0), (1, 1), (2, 0), (2, 2), (0, 2), (0, 0)])
points = np.array([(.8, 1), (.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])
check(boundary, points, [np.sqrt(.2**2 / 2), np.sqrt(2 * .2**2), .2,
np.sqrt(.1**2 + .2**2), np.sqrt(2 * .2**2), np.sqrt(.2**2 / 2)])
def test_calc_distance_point_vertical_outside():
boundary = np.array([(0, 0), (1, 1), (2, 0), (0, 0)])
points = np.array([(.8, 1), (.8, 1.2), (1, 1.2), (1.1, 1.2), (1.2, 1.2), (1.2, 1)])
check(boundary, points, [-np.sqrt(.2**2 / 2), -np.sqrt(2 * .2**2), -.2,
-np.sqrt(.1**2 + .2**2), -np.sqrt(2 * .2**2), -np.sqrt(.2**2 / 2)])
def test_calc_distance_point_horizontal():
boundary = np.array([(0, 0), (2, 0), (1, 1), (2, 2), (0, 2), (0, 0)])
points = np.array([(1, .8), (.8, .8), (.8, 1), (.8, 1.1), (.8, 1.2), (1, 1.2)])
check(boundary, points, [np.sqrt(.2**2 / 2), np.sqrt(2 * .2**2), .2,
np.sqrt(.1**2 + .2**2), np.sqrt(2 * .2**2), np.sqrt(.2**2 / 2)])
def testPolygon_Line():
boundary = [(0, 0), (0, 2)]
with pytest.raises(AssertionError, match="Area must be non-zero"):
PolygonBoundaryComp(1, boundary)
def test_calc_distance_U_shape():
boundary = | np.array([(0, 0), (3, 0), (3, 2), (2, 2), (2, 1), (1, 1), (1, 2), (0, 2)]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Multi-lib backend for POT
The goal is to write backend-agnostic code. Whether you're using Numpy, PyTorch,
or Jax, POT code should work nonetheless.
To achieve that, POT provides backend classes which implements functions in their respective backend
imitating Numpy API. As a convention, we use nx instead of np to refer to the backend.
Examples
--------
>>> from ot.utils import list_to_array
>>> from ot.backend import get_backend
>>> def f(a, b): # the function does not know which backend to use
... a, b = list_to_array(a, b) # if a list in given, make it an array
... nx = get_backend(a, b) # infer the backend from the arguments
... c = nx.dot(a, b) # now use the backend to do any calculation
... return c
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import scipy.special as scipy
from scipy.sparse import issparse, coo_matrix, csr_matrix
try:
import torch
torch_type = torch.Tensor
except ImportError:
torch = False
torch_type = float
try:
import jax
import jax.numpy as jnp
import jax.scipy.special as jscipy
jax_type = jax.numpy.ndarray
except ImportError:
jax = False
jax_type = float
str_type_error = "All array should be from the same type/backend. Current types are : {}"
def get_backend_list():
"""Returns the list of available backends"""
lst = [NumpyBackend(), ]
if torch:
lst.append(TorchBackend())
if jax:
lst.append(JaxBackend())
return lst
def get_backend(*args):
"""Returns the proper backend for a list of input arrays
Also raises TypeError if all arrays are not from the same backend
"""
# check that some arrays given
if not len(args) > 0:
raise ValueError(" The function takes at least one parameter")
# check all same type
if not len(set(type(a) for a in args)) == 1:
raise ValueError(str_type_error.format([type(a) for a in args]))
if isinstance(args[0], np.ndarray):
return NumpyBackend()
elif isinstance(args[0], torch_type):
return TorchBackend()
elif isinstance(args[0], jax_type):
return JaxBackend()
else:
raise ValueError("Unknown type of non implemented backend.")
def to_numpy(*args):
"""Returns numpy arrays from any compatible backend"""
if len(args) == 1:
return get_backend(args[0]).to_numpy(args[0])
else:
return [get_backend(a).to_numpy(a) for a in args]
class Backend():
"""
Backend abstract class.
Implementations: :py:class:`JaxBackend`, :py:class:`NumpyBackend`, :py:class:`TorchBackend`
- The `__name__` class attribute refers to the name of the backend.
- The `__type__` class attribute refers to the data structure used by the backend.
"""
__name__ = None
__type__ = None
__type_list__ = None
rng_ = None
def __str__(self):
return self.__name__
# convert to numpy
def to_numpy(self, a):
"""Returns the numpy version of a tensor"""
raise NotImplementedError()
# convert from numpy
def from_numpy(self, a, type_as=None):
"""Creates a tensor cloning a numpy array, with the given precision (defaulting to input's precision) and the given device (in case of GPUs)"""
raise NotImplementedError()
def set_gradients(self, val, inputs, grads):
"""Define the gradients for the value val wrt the inputs """
raise NotImplementedError()
def zeros(self, shape, type_as=None):
r"""
Creates a tensor full of zeros.
This function follows the api from :any:`numpy.zeros`
See: https://numpy.org/doc/stable/reference/generated/numpy.zeros.html
"""
raise NotImplementedError()
def ones(self, shape, type_as=None):
r"""
Creates a tensor full of ones.
This function follows the api from :any:`numpy.ones`
See: https://numpy.org/doc/stable/reference/generated/numpy.ones.html
"""
raise NotImplementedError()
def arange(self, stop, start=0, step=1, type_as=None):
r"""
Returns evenly spaced values within a given interval.
This function follows the api from :any:`numpy.arange`
See: https://numpy.org/doc/stable/reference/generated/numpy.arange.html
"""
raise NotImplementedError()
def full(self, shape, fill_value, type_as=None):
r"""
Creates a tensor with given shape, filled with given value.
This function follows the api from :any:`numpy.full`
See: https://numpy.org/doc/stable/reference/generated/numpy.full.html
"""
raise NotImplementedError()
def eye(self, N, M=None, type_as=None):
r"""
Creates the identity matrix of given size.
This function follows the api from :any:`numpy.eye`
See: https://numpy.org/doc/stable/reference/generated/numpy.eye.html
"""
raise NotImplementedError()
def sum(self, a, axis=None, keepdims=False):
r"""
Sums tensor elements over given dimensions.
This function follows the api from :any:`numpy.sum`
See: https://numpy.org/doc/stable/reference/generated/numpy.sum.html
"""
raise NotImplementedError()
def cumsum(self, a, axis=None):
r"""
Returns the cumulative sum of tensor elements over given dimensions.
This function follows the api from :any:`numpy.cumsum`
See: https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html
"""
raise NotImplementedError()
def max(self, a, axis=None, keepdims=False):
r"""
Returns the maximum of an array or maximum along given dimensions.
This function follows the api from :any:`numpy.amax`
See: https://numpy.org/doc/stable/reference/generated/numpy.amax.html
"""
raise NotImplementedError()
def min(self, a, axis=None, keepdims=False):
r"""
Returns the maximum of an array or maximum along given dimensions.
This function follows the api from :any:`numpy.amin`
See: https://numpy.org/doc/stable/reference/generated/numpy.amin.html
"""
raise NotImplementedError()
def maximum(self, a, b):
r"""
Returns element-wise maximum of array elements.
This function follows the api from :any:`numpy.maximum`
See: https://numpy.org/doc/stable/reference/generated/numpy.maximum.html
"""
raise NotImplementedError()
def minimum(self, a, b):
r"""
Returns element-wise minimum of array elements.
This function follows the api from :any:`numpy.minimum`
See: https://numpy.org/doc/stable/reference/generated/numpy.minimum.html
"""
raise NotImplementedError()
def dot(self, a, b):
r"""
Returns the dot product of two tensors.
This function follows the api from :any:`numpy.dot`
See: https://numpy.org/doc/stable/reference/generated/numpy.dot.html
"""
raise NotImplementedError()
def abs(self, a):
r"""
Computes the absolute value element-wise.
This function follows the api from :any:`numpy.absolute`
See: https://numpy.org/doc/stable/reference/generated/numpy.absolute.html
"""
raise NotImplementedError()
def exp(self, a):
r"""
Computes the exponential value element-wise.
This function follows the api from :any:`numpy.exp`
See: https://numpy.org/doc/stable/reference/generated/numpy.exp.html
"""
raise NotImplementedError()
def log(self, a):
r"""
Computes the natural logarithm, element-wise.
This function follows the api from :any:`numpy.log`
See: https://numpy.org/doc/stable/reference/generated/numpy.log.html
"""
raise NotImplementedError()
def sqrt(self, a):
r"""
Returns the non-ngeative square root of a tensor, element-wise.
This function follows the api from :any:`numpy.sqrt`
See: https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html
"""
raise NotImplementedError()
def power(self, a, exponents):
r"""
First tensor elements raised to powers from second tensor, element-wise.
This function follows the api from :any:`numpy.power`
See: https://numpy.org/doc/stable/reference/generated/numpy.power.html
"""
raise NotImplementedError()
def norm(self, a):
r"""
Computes the matrix frobenius norm.
This function follows the api from :any:`numpy.linalg.norm`
See: https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html
"""
raise NotImplementedError()
def any(self, a):
r"""
Tests whether any tensor element along given dimensions evaluates to True.
This function follows the api from :any:`numpy.any`
See: https://numpy.org/doc/stable/reference/generated/numpy.any.html
"""
raise NotImplementedError()
def isnan(self, a):
r"""
Tests element-wise for NaN and returns result as a boolean tensor.
This function follows the api from :any:`numpy.isnan`
See: https://numpy.org/doc/stable/reference/generated/numpy.isnan.html
"""
raise NotImplementedError()
def isinf(self, a):
r"""
Tests element-wise for positive or negative infinity and returns result as a boolean tensor.
This function follows the api from :any:`numpy.isinf`
See: https://numpy.org/doc/stable/reference/generated/numpy.isinf.html
"""
raise NotImplementedError()
def einsum(self, subscripts, *operands):
r"""
Evaluates the Einstein summation convention on the operands.
This function follows the api from :any:`numpy.einsum`
See: https://numpy.org/doc/stable/reference/generated/numpy.einsum.html
"""
raise NotImplementedError()
def sort(self, a, axis=-1):
r"""
Returns a sorted copy of a tensor.
This function follows the api from :any:`numpy.sort`
See: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
"""
raise NotImplementedError()
def argsort(self, a, axis=None):
r"""
Returns the indices that would sort a tensor.
This function follows the api from :any:`numpy.argsort`
See: https://numpy.org/doc/stable/reference/generated/numpy.argsort.html
"""
raise NotImplementedError()
def searchsorted(self, a, v, side='left'):
r"""
Finds indices where elements should be inserted to maintain order in given tensor.
This function follows the api from :any:`numpy.searchsorted`
See: https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html
"""
raise NotImplementedError()
def flip(self, a, axis=None):
r"""
Reverses the order of elements in a tensor along given dimensions.
This function follows the api from :any:`numpy.flip`
See: https://numpy.org/doc/stable/reference/generated/numpy.flip.html
"""
raise NotImplementedError()
def clip(self, a, a_min, a_max):
"""
Limits the values in a tensor.
This function follows the api from :any:`numpy.clip`
See: https://numpy.org/doc/stable/reference/generated/numpy.clip.html
"""
raise NotImplementedError()
def repeat(self, a, repeats, axis=None):
r"""
Repeats elements of a tensor.
This function follows the api from :any:`numpy.repeat`
See: https://numpy.org/doc/stable/reference/generated/numpy.repeat.html
"""
raise NotImplementedError()
def take_along_axis(self, arr, indices, axis):
r"""
Gathers elements of a tensor along given dimensions.
This function follows the api from :any:`numpy.take_along_axis`
See: https://numpy.org/doc/stable/reference/generated/numpy.take_along_axis.html
"""
raise NotImplementedError()
def concatenate(self, arrays, axis=0):
r"""
Joins a sequence of tensors along an existing dimension.
This function follows the api from :any:`numpy.concatenate`
See: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html
"""
raise NotImplementedError()
def zero_pad(self, a, pad_width):
r"""
Pads a tensor.
This function follows the api from :any:`numpy.pad`
See: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
"""
raise NotImplementedError()
def argmax(self, a, axis=None):
r"""
Returns the indices of the maximum values of a tensor along given dimensions.
This function follows the api from :any:`numpy.argmax`
See: https://numpy.org/doc/stable/reference/generated/numpy.argmax.html
"""
raise NotImplementedError()
def mean(self, a, axis=None):
r"""
Computes the arithmetic mean of a tensor along given dimensions.
This function follows the api from :any:`numpy.mean`
See: https://numpy.org/doc/stable/reference/generated/numpy.mean.html
"""
raise NotImplementedError()
def std(self, a, axis=None):
r"""
Computes the standard deviation of a tensor along given dimensions.
This function follows the api from :any:`numpy.std`
See: https://numpy.org/doc/stable/reference/generated/numpy.std.html
"""
raise NotImplementedError()
def linspace(self, start, stop, num):
r"""
Returns a specified number of evenly spaced values over a given interval.
This function follows the api from :any:`numpy.linspace`
See: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html
"""
raise NotImplementedError()
def meshgrid(self, a, b):
r"""
Returns coordinate matrices from coordinate vectors (Numpy convention).
This function follows the api from :any:`numpy.meshgrid`
See: https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
"""
raise NotImplementedError()
def diag(self, a, k=0):
r"""
Extracts or constructs a diagonal tensor.
This function follows the api from :any:`numpy.diag`
See: https://numpy.org/doc/stable/reference/generated/numpy.diag.html
"""
raise NotImplementedError()
def unique(self, a):
r"""
Finds unique elements of given tensor.
This function follows the api from :any:`numpy.unique`
See: https://numpy.org/doc/stable/reference/generated/numpy.unique.html
"""
raise NotImplementedError()
def logsumexp(self, a, axis=None):
r"""
Computes the log of the sum of exponentials of input elements.
This function follows the api from :any:`scipy.special.logsumexp`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.logsumexp.html
"""
raise NotImplementedError()
def stack(self, arrays, axis=0):
r"""
Joins a sequence of tensors along a new dimension.
This function follows the api from :any:`numpy.stack`
See: https://numpy.org/doc/stable/reference/generated/numpy.stack.html
"""
raise NotImplementedError()
def outer(self, a, b):
r"""
Computes the outer product between two vectors.
This function follows the api from :any:`numpy.outer`
See: https://numpy.org/doc/stable/reference/generated/numpy.outer.html
"""
raise NotImplementedError()
def reshape(self, a, shape):
r"""
Gives a new shape to a tensor without changing its data.
This function follows the api from :any:`numpy.reshape`
See: https://numpy.org/doc/stable/reference/generated/numpy.reshape.html
"""
raise NotImplementedError()
def seed(self, seed=None):
r"""
Sets the seed for the random generator.
This function follows the api from :any:`numpy.random.seed`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.seed.html
"""
raise NotImplementedError()
def rand(self, *size, type_as=None):
r"""
Generate uniform random numbers.
This function follows the api from :any:`numpy.random.rand`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.rand.html
"""
raise NotImplementedError()
def randn(self, *size, type_as=None):
r"""
Generate normal Gaussian random numbers.
This function follows the api from :any:`numpy.random.rand`
See: https://numpy.org/doc/stable/reference/generated/numpy.random.rand.html
"""
raise NotImplementedError()
def coo_matrix(self, data, rows, cols, shape=None, type_as=None):
r"""
Creates a sparse tensor in COOrdinate format.
This function follows the api from :any:`scipy.sparse.coo_matrix`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html
"""
raise NotImplementedError()
def issparse(self, a):
r"""
Checks whether or not the input tensor is a sparse tensor.
This function follows the api from :any:`scipy.sparse.issparse`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.issparse.html
"""
raise NotImplementedError()
def tocsr(self, a):
r"""
Converts this matrix to Compressed Sparse Row format.
This function follows the api from :any:`scipy.sparse.coo_matrix.tocsr`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.tocsr.html
"""
raise NotImplementedError()
def eliminate_zeros(self, a, threshold=0.):
r"""
Removes entries smaller than the given threshold from the sparse tensor.
This function follows the api from :any:`scipy.sparse.csr_matrix.eliminate_zeros`
See: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.csr_matrix.eliminate_zeros.html
"""
raise NotImplementedError()
def todense(self, a):
r"""
Converts a sparse tensor to a dense tensor.
This function follows the api from :any:`scipy.sparse.csr_matrix.toarray`
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.toarray.html
"""
raise NotImplementedError()
def where(self, condition, x, y):
r"""
Returns elements chosen from x or y depending on condition.
This function follows the api from :any:`numpy.where`
See: https://numpy.org/doc/stable/reference/generated/numpy.where.html
"""
raise NotImplementedError()
def copy(self, a):
r"""
Returns a copy of the given tensor.
This function follows the api from :any:`numpy.copy`
See: https://numpy.org/doc/stable/reference/generated/numpy.copy.html
"""
raise NotImplementedError()
def allclose(self, a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
r"""
Returns True if two arrays are element-wise equal within a tolerance.
This function follows the api from :any:`numpy.allclose`
See: https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
"""
raise NotImplementedError()
def dtype_device(self, a):
r"""
Returns the dtype and the device of the given tensor.
"""
raise NotImplementedError()
def assert_same_dtype_device(self, a, b):
r"""
Checks whether or not the two given inputs have the same dtype as well as the same device
"""
raise NotImplementedError()
class NumpyBackend(Backend):
"""
NumPy implementation of the backend
- `__name__` is "numpy"
- `__type__` is np.ndarray
"""
__name__ = 'numpy'
__type__ = np.ndarray
__type_list__ = [np.array(1, dtype=np.float32),
np.array(1, dtype=np.float64)]
rng_ = np.random.RandomState()
def to_numpy(self, a):
return a
def from_numpy(self, a, type_as=None):
if type_as is None:
return a
elif isinstance(a, float):
return a
else:
return a.astype(type_as.dtype)
def set_gradients(self, val, inputs, grads):
# No gradients for numpy
return val
def zeros(self, shape, type_as=None):
if type_as is None:
return np.zeros(shape)
else:
return np.zeros(shape, dtype=type_as.dtype)
def ones(self, shape, type_as=None):
if type_as is None:
return np.ones(shape)
else:
return np.ones(shape, dtype=type_as.dtype)
def arange(self, stop, start=0, step=1, type_as=None):
return np.arange(start, stop, step)
def full(self, shape, fill_value, type_as=None):
if type_as is None:
return np.full(shape, fill_value)
else:
return np.full(shape, fill_value, dtype=type_as.dtype)
def eye(self, N, M=None, type_as=None):
if type_as is None:
return np.eye(N, M)
else:
return np.eye(N, M, dtype=type_as.dtype)
def sum(self, a, axis=None, keepdims=False):
return np.sum(a, axis, keepdims=keepdims)
def cumsum(self, a, axis=None):
return np.cumsum(a, axis)
def max(self, a, axis=None, keepdims=False):
return np.max(a, axis, keepdims=keepdims)
def min(self, a, axis=None, keepdims=False):
return np.min(a, axis, keepdims=keepdims)
def maximum(self, a, b):
return np.maximum(a, b)
def minimum(self, a, b):
return np.minimum(a, b)
def dot(self, a, b):
return np.dot(a, b)
def abs(self, a):
return np.abs(a)
def exp(self, a):
return np.exp(a)
def log(self, a):
return np.log(a)
def sqrt(self, a):
return np.sqrt(a)
def power(self, a, exponents):
return np.power(a, exponents)
def norm(self, a):
return np.sqrt(np.sum(np.square(a)))
def any(self, a):
return np.any(a)
def isnan(self, a):
return np.isnan(a)
def isinf(self, a):
return np.isinf(a)
def einsum(self, subscripts, *operands):
return np.einsum(subscripts, *operands)
def sort(self, a, axis=-1):
return np.sort(a, axis)
def argsort(self, a, axis=-1):
return np.argsort(a, axis)
def searchsorted(self, a, v, side='left'):
if a.ndim == 1:
return np.searchsorted(a, v, side)
else:
# this is a not very efficient way to make numpy
# searchsorted work on 2d arrays
ret = np.empty(v.shape, dtype=int)
for i in range(a.shape[0]):
ret[i, :] = np.searchsorted(a[i, :], v[i, :], side)
return ret
def flip(self, a, axis=None):
return np.flip(a, axis)
def outer(self, a, b):
return np.outer(a, b)
def clip(self, a, a_min, a_max):
return np.clip(a, a_min, a_max)
def repeat(self, a, repeats, axis=None):
return np.repeat(a, repeats, axis)
def take_along_axis(self, arr, indices, axis):
return np.take_along_axis(arr, indices, axis)
def concatenate(self, arrays, axis=0):
return np.concatenate(arrays, axis)
def zero_pad(self, a, pad_width):
return np.pad(a, pad_width)
def argmax(self, a, axis=None):
return np.argmax(a, axis=axis)
def mean(self, a, axis=None):
return np.mean(a, axis=axis)
def std(self, a, axis=None):
return np.std(a, axis=axis)
def linspace(self, start, stop, num):
return np.linspace(start, stop, num)
def meshgrid(self, a, b):
return np.meshgrid(a, b)
def diag(self, a, k=0):
return np.diag(a, k)
def unique(self, a):
return | np.unique(a) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 12:22:44 2017
@author: a.sancho.asensio
"""
import argparse
import base64
import json
import re, sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import math
import pandas as pd
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from keras.utils import np_utils
from keras.optimizers import SGD, Adam, RMSprop
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU, ELU
import keras.backend as K
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
if os.name == 'nt': # We're on the Windows machine.
print(" > Loading paths for the Windows machine")
PATH = "C:/Users/a.sancho.asensio/Documents/PaperWork/nanodegree/git/simulator-windows-64/"
else: # Linux/MAC machine.
print(" > Loading paths for the Linux machine")
PATH = "/home/andreu/nanodegree/simulator-linux/"
g_steering = np.zeros(10, dtype="float32") # Global array containing the last steering angles.
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
#old_angle = float(data["steering_angle"]) / 25.0 # We need to normalize!
# The current throttle of the car
#throttle = data["throttle"]
# The current speed of the car
#speed = float(data["speed"])
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image, dtype="uint8")
# This model currently assumes that the features of the model are just the images. Feel free to change this.
image_array = image_array[16:144, :, :] # Crop the image removing useless areas...
image_array = cv2.resize(image_array, (160, 64), interpolation=cv2.INTER_AREA)
transformed_image_array = image_array[None, :, :, :]
prediction = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 1.0
# Filter the data.
global g_steering
final_steering = 0.9 * prediction + 0.1 * np.mean(g_steering)
g_steering = | np.roll(g_steering, 1) | numpy.roll |
# coding: utf-8
# /*##########################################################################
# Copyright (C) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""
Nominal tests of the HistogramndLut function.
"""
import unittest
import numpy as np
from silx.math import HistogramndLut
def _get_bin_edges(histo_range, n_bins, n_dims):
edges = []
for i_dim in range(n_dims):
edges.append(histo_range[i_dim, 0] +
np.arange(n_bins[i_dim] + 1) *
(histo_range[i_dim, 1] - histo_range[i_dim, 0]) /
n_bins[i_dim])
return tuple(edges)
# ==============================================================
# ==============================================================
# ==============================================================
class _TestHistogramndLut_nominal(unittest.TestCase):
"""
Unit tests of the HistogramndLut class.
"""
ndims = None
def setUp(self):
ndims = self.ndims
self.tested_dim = ndims-1
if ndims is None:
raise ValueError('ndims class member not set.')
sample = np.array([5.5, -3.3,
0., -0.5,
3.3, 8.8,
-7.7, 6.0,
-4.0])
weights = np.array([500.5, -300.3,
0.01, -0.5,
300.3, 800.8,
-700.7, 600.6,
-400.4])
n_elems = len(sample)
if ndims == 1:
shape = (n_elems,)
else:
shape = (n_elems, ndims)
self.sample = np.zeros(shape=shape, dtype=sample.dtype)
if ndims == 1:
self.sample = sample
else:
self.sample[..., ndims-1] = sample
self.weights = weights
# the tests are performed along one dimension,
# all the other bins indices along the other dimensions
# are expected to be 2
# (e.g : when testing a 2D sample : [0, x] will go into
# bin [2, y] because of the bin ranges [-2, 2] and n_bins = 4
# for the first dimension)
self.other_axes_index = 2
self.histo_range = np.repeat([[-2., 2.]], ndims, axis=0)
self.histo_range[ndims-1] = [-4., 6.]
self.n_bins = np.array([4]*ndims)
self.n_bins[ndims-1] = 5
if ndims == 1:
def fill_histo(h, v, dim, op=None):
if op:
h[:] = op(h[:], v)
else:
h[:] = v
self.fill_histo = fill_histo
else:
def fill_histo(h, v, dim, op=None):
idx = [self.other_axes_index]*len(h.shape)
idx[dim] = slice(0, None)
if op:
h[idx] = op(h[idx], v)
else:
h[idx] = v
self.fill_histo = fill_histo
def test_nominal_bin_edges(self):
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
bin_edges = instance.bins_edges
expected_edges = _get_bin_edges(self.histo_range,
self.n_bins,
self.ndims)
for i_edges, edges in enumerate(expected_edges):
self.assertTrue(np.array_equal(bin_edges[i_edges],
expected_edges[i_edges]),
msg='Testing bin_edges for dim {0}'
''.format(i_edges+1))
def test_nominal_histo_range(self):
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
histo_range = instance.histo_range
self.assertTrue(np.array_equal(histo_range, self.histo_range))
def test_nominal_last_bin_closed(self):
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
last_bin_closed = instance.last_bin_closed
self.assertEqual(last_bin_closed, False)
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins,
last_bin_closed=True)
last_bin_closed = instance.last_bin_closed
self.assertEqual(last_bin_closed, True)
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins,
last_bin_closed=False)
last_bin_closed = instance.last_bin_closed
self.assertEqual(last_bin_closed, False)
def test_nominal_n_bins_array(self):
test_n_bins = np.arange(self.ndims) + 10
instance = HistogramndLut(self.sample,
self.histo_range,
test_n_bins)
n_bins = instance.n_bins
self.assertTrue(np.array_equal(test_n_bins, n_bins))
def test_nominal_n_bins_scalar(self):
test_n_bins = 10
expected_n_bins = np.array([test_n_bins] * self.ndims)
instance = HistogramndLut(self.sample,
self.histo_range,
test_n_bins)
n_bins = instance.n_bins
self.assertTrue(np.array_equal(expected_n_bins, n_bins))
def test_nominal_histo_ref(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
instance.accumulate(self.weights)
histo = instance.histo()
w_histo = instance.weighted_histo()
histo_ref = instance.histo(copy=False)
w_histo_ref = instance.weighted_histo(copy=False)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(w_histo, expected_c))
self.assertTrue(np.array_equal(histo_ref, expected_h))
self.assertTrue(np.array_equal(w_histo_ref, expected_c))
histo_ref[0, ...] = histo_ref[0, ...] + 10
w_histo_ref[0, ...] = w_histo_ref[0, ...] + 20
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(w_histo, expected_c))
self.assertFalse(np.array_equal(histo_ref, expected_h))
self.assertFalse(np.array_equal(w_histo_ref, expected_c))
histo_2 = instance.histo()
w_histo_2 = instance.weighted_histo()
self.assertFalse(np.array_equal(histo_2, expected_h))
self.assertFalse(np.array_equal(w_histo_2, expected_c))
self.assertTrue(np.array_equal(histo_2, histo_ref))
self.assertTrue(np.array_equal(w_histo_2, w_histo_ref))
def test_nominal_accumulate_once(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
instance.accumulate(self.weights)
histo = instance.histo()
w_histo = instance.weighted_histo()
self.assertEqual(w_histo.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(w_histo, expected_c))
self.assertTrue(np.array_equal(instance.histo(), expected_h))
self.assertTrue(np.array_equal(instance.weighted_histo(),
expected_c))
def test_nominal_accumulate_twice(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = np.array([-700.7, -0.5, 0.01, 300.3, 500.5])
expected_h = np.zeros(shape=self.n_bins, dtype=np.double)
expected_c = np.zeros(shape=self.n_bins, dtype=np.double)
self.fill_histo(expected_h, expected_h_tpl, self.ndims-1)
self.fill_histo(expected_c, expected_c_tpl, self.ndims-1)
# calling accumulate twice
expected_h *= 2
expected_c *= 2
instance = HistogramndLut(self.sample,
self.histo_range,
self.n_bins)
instance.accumulate(self.weights)
instance.accumulate(self.weights)
histo = instance.histo()
w_histo = instance.weighted_histo()
self.assertEqual(w_histo.dtype, np.float64)
self.assertEqual(histo.dtype, np.uint32)
self.assertTrue(np.array_equal(histo, expected_h))
self.assertTrue(np.array_equal(w_histo, expected_c))
self.assertTrue(np.array_equal(instance.histo(), expected_h))
self.assertTrue(np.array_equal(instance.weighted_histo(),
expected_c))
def test_nominal_apply_lut_once(self):
"""
"""
expected_h_tpl = np.array([2, 1, 1, 1, 1])
expected_c_tpl = | np.array([-700.7, -0.5, 0.01, 300.3, 500.5]) | numpy.array |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Numpy Basics
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# create a 2D array
import numpy as np
arr2D = np.array([[1,4,6],[2,5,7]])
# getting information about arr2D
print(arr2D.size) # returns 6, the no. of items
print(arr2D.ndim) # returns 2, the no. of dimensions
print(arr2D.shape) # returns tuple(2,3) corresponding to 2 rows & 3 columns
# create a 1D array
arr1D = np.array([1,4,6])
# getting information about arr1D
print(arr1D.size) # returns 3, the no. of items
print(arr1D.ndim) # returns 1, the no. of dimensions
print(arr1D.shape) # returns tuple(3,) corresponding to 3 items
#%% creating numpy arrays
# creating sequence of numbers
arr1 = np.arange(3, 6) # same as Python range function; results in array([3,4,5])
arr2 = np.arange(3, 9, 2) # the 3rd argument defines the step size; results in array([3,5,7])
arr3 = np.linspace(1,7,3) # creates evenly spaced 3 values from 1 to 9; results in array([1,4,7])
# creating special arrays
arr4 = np.ones((2,1)) # array of shape (2,1) with all items as 1
arr5 = np.zeros((2,2)) # all items as zero; often used as placeholder array at beginning of script
arr6 = np.eye(2) # diagonal items as 1
# adding axis to existing arrays (e.g., converting 1D array to 2D array)
print(arr1[:, np.newaxis])
arr7 = arr1[:, None] # same as above
# combining / stacking arrays
print(np.hstack((arr1, arr2))) # horizontally stacks passed arrays
print(np.vstack((arr1, arr2))) # vertically stacks passed arrays
print(np.hstack((arr5,arr4))) # array 4 added as a column into arr5
print(np.vstack((arr5,arr6))) # rows of array 6 added onto arr5
#%% basic numpy functions
print(arr2D.sum(axis=0))
print(arr2D.sum(axis=1))
#%% indexing arrays
# accessing individual items
print(arr2D[1,2]) # returns 7
# slicing
arr8 = np.arange(10).reshape((2,5)) # rearrange the 1D array into shape (2,5)
print((arr8[0:1,1:3]))
print((arr8[0,1:3])) # note that a 1D array is returned here instead of the 2D array above
# accessing entire row or column
print(arr8[1]) # returns 2nd row as array([5,6,7,8,9]); same as arr8[1,:]
print(arr8[:, 4]) # returns items of 3rd column as a 1D array
# extract a subarray from arr8 and modify it
arr8_sub = arr8[:, :2] # columns 0 and 1 from all rows
arr8_sub[1, 1] = 1000
print(arr8) # arr8 gets modified as well!!
# use copy method for a separate copy
arr8 = np.arange(10).reshape((2,5))
arr8_sub2 = arr8[:, :2].copy()
arr8_sub2[1, 1] = 100
print(arr8)
# Fancy indexing
# combination of simple and fancy indexing
arr8_sub3 = arr8[:, [0, 1]] # note how columns are indexed via a list
arr8_sub3[1, 1] = 100 # arr8_sub3 becomes same as arr8_sub2 but arr8 is not modified here
print(arr8)
# use boolean mask to select subarray
arr8_sub4 = arr8[arr8 > 5] # returns array([6,7,8,9]), i.e., all values > 5
arr8_sub4[0] = 0 # again, arr8 is not affected
print(arr8)
#%% vectorized operations
vec1 = np.array([1,2,3,4])
vec2 = | np.array([5,6,7,8]) | numpy.array |
import logging
import os
from dataclasses import dataclass
from string import Template
import numpy as np
import tables
import pyinotify
from phd.thunderstorm.convert_to_hdf5 import CylinderProtoSet
from phd.utils.hdf5_tools import ProtoSetReader
from phd.utils.run_tools import G4CinServer, CinServerParameters
from tables import Filters, Table, Group
import matplotlib.pyplot as plt
ROOT_PATH = os.path.dirname(__file__)
INPUT_TEMPLATE = """/npm/geometry/type gdml
/npm/geometry/gdml critical_energy.gdml
/npm/thunderstorm/physics ${physics}
/npm/thunderstorm/minimal_energy ${energy} MeV
/npm/thunderstorm/stepping/type critical_energy
/npm/thunderstorm/stacking/electron false
/npm/thunderstorm/stacking/positron false
/npm/thunderstorm/stacking/gamma false
/npm/thunderstorm/stacking/save_gamma false
/npm/thunderstorm/stacking/save_electron true
/npm/thunderstorm/stacking/save_electron_cut ${energy} MeV
separator
"""
MESSEGE = """/gps/particle e-
/gps/number 1
/gps/direction 0 0 -1
/gps/ene/mono ${energy} MeV
/gps/position 0.0 0.0 0.0 m
/run/beamOn ${number}
separator
"""
class Processor:
def init_messege(self) -> str:
return ""
def process(self, event):
return None
def accept(self, event):
pass
def create_gdml(template_file, values: dict):
with open(template_file) as fin:
gdml_template = fin.read()
gdml_template = Template(gdml_template)
with open("critical_energy.gdml", 'w') as fout:
fout.write(gdml_template.substitute(values))
return 0
class CriticalEnergyProcessor(Processor):
def __init__(self, meta):
self.reader = ProtoSetReader("stacking_simple.bin", CylinderProtoSet)
filters = Filters(complevel=3, fletcher32=True)
self.reader.set_filters(filters)
self.path_hdf5 = "result.hdf5"
self.counter = 0
self.mess_templte = Template(MESSEGE)
self.meta = meta
self.step = 0.001
def init_messege(self) -> str:
return self.mess_templte.substitute(self.meta)
def process(self, event):
group_path = self.convert(event.pathname)
os.remove(event.pathname)
with tables.open_file(self.path_hdf5) as h5file:
table: Table = h5file.get_node(group_path, "stacking_simple")
n_electron = table.nrows
n_primary = table.attrs["number"]
gamma = n_electron / n_primary
if gamma > 1:
return None
else:
self.meta["energy"] = self.meta["energy"] + self.step
return self.init_messege()
def accept(self, event):
return event.name == "stacking_simple.bin"
def convert(self, path):
with tables.open_file(self.path_hdf5, mode="a") as h5file:
group = h5file.create_group(h5file.root, "sim{}".format(str(self.counter).rjust(4, '0')))
self.reader(path, h5file, group)
for table in h5file.iter_nodes(group):
if (isinstance(table, Group)):
continue
for key, value in self.meta.items():
table.attrs[key] = value
self.counter += 1
return group._v_pathname
class G4CinServerHandler(pyinotify.ProcessEvent):
def my_init(self, server: G4CinServer, processor: Processor):
self.server = server
self.processor = processor
self.server.send(processor.init_messege())
def process_IN_CREATE(self, event):
logging.root.info(str(event))
def process_IN_CLOSE_WRITE(self, event):
if self.processor.accept(event):
result = self.processor.process(event)
if result is not None:
self.server.send(result)
else:
raise KeyboardInterrupt
import star
import numpy as np
from phd.thunderstorm import atmosphere
from scipy.optimize import root_scalar
def get_critical_energy(height = 0, field = 0):
"""
:param height: meters
:param field:kV/cm
:return:
"""
material = star.electron.PredefinedMaterials.AIR_DRY_NEAR_SEA_LEVEL
density = atmosphere.ISACalculator.density(height) # kg/m3
def critical_energy_equation(energy):
data = star.electron.calculate_stopping_power(material, np.asarray([energy]))
stopPower = data["stopping_power_total"][0]
return field - stopPower*density
try:
critical_energy_root = root_scalar(
critical_energy_equation,
bracket=(0.001, 2.0),
)
except ValueError as err:
print(err)
return None
return critical_energy_root
from scipy.linalg import lstsq
# def calculate_secondary_production_rate(path):
# bins = np.arange(-500.0, 501, 1)
# x = bins[:-1]
# M = x[:, np.newaxis] ** [0, 1]
#
# dtype = np.dtype(
# [
# ("field", "d"),
# ("height", "d"),
# ("energy", "d"),
# ("k", "d"),
# ("b", "d")
# ]
# )
#
# with tables.open_file(path) as h5file:
# result = []
# for group in h5file.root:
# table = h5file.get_node(group, "stacking_simple")
# data = table.read()
# field = table.attrs["values_gdml_field"][0]
# height = table.attrs["values_gdml_height"][0]
# energy = table.attrs["values_macros_energy"]
# number = table.attrs["values_macros_number"]
# temp, _ = np.histogram(data["z"], bins=bins)
# temp = np.cumsum(temp)
# y = temp / number
# p, res, rnk, s = lstsq(M, y)
# result.append((field, height, energy, p[1], p[0]))
# return np.array(result, dtype=dtype)
@dataclass(eq=True, frozen=True)
class FieldHeigth:
field: float
height : float
def get_group(path):
with tables.open_file(path) as h5file:
result = {}
for group in h5file.root:
table = h5file.get_node(group, "stacking_simple")
field = table.attrs["values_gdml_field"][0]
height = table.attrs["values_gdml_height"][0]
energy = table.attrs["values_macros_energy"]
key = FieldHeigth(field, height)
if key in result.keys():
result[key].append((energy, group._v_name))
else:
result[key] = [(energy, group._v_name)]
for value in result.values():
value.sort(key=lambda x: x[0])
return result
def plot_secondary_production_rate(path, output="plot"):
if not os.path.exists(output):
os.mkdir(output)
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
result = []
dtype = np.dtype(
[
("field", "d"),
("height", "d"),
("energy", "d"),
("k", "d"),
("b", "d")
]
)
with tables.open_file(path) as h5file:
for key, value in groups.items():
energy_cut = value[0][0]
plt.clf()
for energy, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
data = data[data["energy"] > energy_cut]
number = table.attrs["values_macros_number"]
temp, _ = np.histogram(data["z"], bins=bins)
temp = np.cumsum(temp[::-1])
y = temp / number
plt.plot(x, y)
path = os.path.join(output, "{}m_{}kV_m.png".format(key.height, key.field*1e4))
plt.xlabel("Height, meters")
plt.ylabel("Cumulative number of electron")
plt.tight_layout()
plt.savefig(path, format="png", transparent=True, dpi = 600)
return 0
def calculate_secondary_production_rate(path, rate_cut = 0.001, method="simple"):
if method not in ["simple", "rate-cut"]:
logging.root.warning("Bad method for {}".format(calculate_secondary_production_rate.__name__))
groups = get_group(path)
bins = np.arange(-500.0, 501, 1)
x = bins[:-1]
M = x[:, np.newaxis] ** [0, 1]
result = []
dtype = np.dtype(
[
("field", "d"),
("height", "d"),
("energy", "d"),
("k", "d"),
("b", "d"),
("chi2", "d")
]
)
with tables.open_file(path) as h5file:
for key, value in groups.items():
energy_cut = value[0][0]
for energy, group_name in value:
table: tables.Table = h5file.get_node("/{}".format(group_name), "stacking_simple")
data = table.read()
data = data[data["energy"] > energy_cut]
number = table.attrs["values_macros_number"]
temp, _ = | np.histogram(data["z"], bins=bins) | numpy.histogram |
"""
Based on https://github.com/dsadigh/driving-preferences
and https://github.com/Stanford-ILIAD/easy-active-learning/
"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import gym
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from gym import spaces
from gym.utils import seeding
from matplotlib.image import AxesImage, BboxImage
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from scipy.ndimage import rotate, zoom
from active_reward_learning.common.constants import (
KEY_DOWN,
KEY_LEFT,
KEY_RIGHT,
KEY_UP,
)
from active_reward_learning.common.policy import FixedPolicy
IMG_FOLDER = os.path.join(os.path.dirname(__file__), "..", "..", "img", "highway")
GRASS = np.tile(plt.imread(os.path.join(IMG_FOLDER, "grass.png")), (5, 5, 1))
CAR = {
color: zoom(
np.array(
plt.imread(os.path.join(IMG_FOLDER, "car-{}.png".format(color))) * 255.0,
dtype=np.uint8, # zoom requires uint8 format
),
[0.3, 0.3, 1.0],
)
for color in ["gray", "orange", "purple", "red", "white", "yellow"]
}
COLOR_AGENT = "orange"
COLOR_ROBOT = "white"
CAR_AGENT = CAR[COLOR_AGENT]
CAR_ROBOT = CAR[COLOR_ROBOT]
CAR_SCALE = 0.15 / max(list(CAR.values())[0].shape[:2])
LANE_SCALE = 10.0
LANE_COLOR = (0.4, 0.4, 0.4) # 'gray'
LANE_BCOLOR = "white"
STEPS = 100
def set_image(
obj: AxesImage,
data: np.ndarray,
scale: float = CAR_SCALE,
x: List[float] = [0.0, 0.0, 0.0, 0.0],
):
ox = x[0]
oy = x[1]
angle = x[2]
img = rotate(data, np.rad2deg(angle))
h, w = img.shape[0], img.shape[1]
obj.set_data(img)
obj.set_extent(
[
ox - scale * w * 0.5,
ox + scale * w * 0.5,
oy - scale * h * 0.5,
oy + scale * h * 0.5,
]
)
class Car:
def __init__(
self,
initial_state: Union[List[float], np.ndarray],
actions: List[Union[Tuple[float, float], np.ndarray]],
):
self.initial_state = initial_state
self.state = self.initial_state
self.actions = actions
self.action_i = 0
def reset(self):
self.state = self.initial_state
self.action_i = 0
def update(
self, update_fct: Callable[[List[float], float, float], List[float]]
) -> None:
u1, u2 = self.actions[self.action_i % len(self.actions)]
self.state = update_fct(self.state, u1, u2)
self.action_i += 1
def gaussian(
self, x: List[float], height: float = 0.07, width: float = 0.03
) -> float:
car_pos = np.asarray([self.state[0], self.state[1]])
car_theta = self.state[2]
car_heading = (np.cos(car_theta), np.sin(car_theta))
pos = np.asarray([x[0], x[1]])
d = car_pos - pos
dh = np.dot(d, car_heading)
dw = np.cross(d, car_heading)
return np.exp(-0.5 * ((dh / height) ** 2 + (dw / width) ** 2))
class Lane:
def __init__(
self,
start_pos: Union[List[float], np.ndarray],
end_pos: Union[List[float], np.ndarray],
width: float,
):
self.start_pos = np.asarray(start_pos)
self.end_pos = np.asarray(end_pos)
self.width = width
d = self.end_pos - self.start_pos
self.dir = d / np.linalg.norm(d)
self.perp = np.asarray([-self.dir[1], self.dir[0]])
def gaussian(self, state: List[float], sigma: float = 0.5) -> float:
pos = np.asarray([state[0], state[1]])
dist_perp = np.dot(pos - self.start_pos, self.perp)
return np.exp(-0.5 * (dist_perp / (sigma * self.width / 2.0)) ** 2)
def direction(self, x: List[float]):
return np.cos(x[2]) * self.dir[0] + np.sin(x[2]) * self.dir[1]
def shifted(self, m: int) -> "Lane":
return Lane(
self.start_pos + self.perp * self.width * m,
self.end_pos + self.perp * self.width * m,
self.width,
)
class HighwayDriving(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 1}
def __init__(
self,
cars: List[Car],
reward_weights: Optional[np.ndarray] = None,
):
self.initial_state = [0, -0.3, np.pi / 2, 0.4]
self.state = self.initial_state
self.episode_length = 50
self.dt = 0.1
self.friction = 1
self.vmax = 1
self.xlim = (-1, 1)
# self.ylim = (-0.2, 0.8)
self.ylim = (-0.4, 2.5)
print("state0", self.state)
lane = Lane([0.0, -1.0], [0.0, 1.0], 0.17)
road = Lane([0.0, -1.0], [0.0, 1.0], 0.17 * 3)
self.lanes = [lane.shifted(0), lane.shifted(-1), lane.shifted(1)]
self.fences = [lane.shifted(2), lane.shifted(-2)]
self.roads = [road]
self.cars = cars
n_features = len(self.get_features())
if reward_weights is not None:
assert reward_weights.shape == (n_features,)
self.reward_w = np.array(reward_weights)
else:
self.reward_w = np.random.normal(size=n_features)
self.reward_w[-1] = 0
self.reward_w /= np.linalg.norm(self.reward_w)
self.reward_w /= 2
self.reward_w[-1] = 0.5
self.action_space = spaces.Box(np.array([-1, -1]), np.array([1, 1]))
self.observation_space = spaces.Box(
np.array([-np.inf, -np.inf, 0, -np.inf]),
np.array([np.inf, np.inf, 2 * np.pi, np.inf]),
dtype=np.float32,
)
self.Ndim_repr = n_features
self.time = 0
self.history: List[
Tuple[List[float], List[Tuple[float, float, float, float]]]
] = []
self._update_history()
self.seed()
def _update_history(self) -> None:
self.history.append((np.array(self.state), self._get_car_states()))
def _get_car_states(self) -> List[np.ndarray]:
return [np.array(car.state) for car in self.cars]
def _update_state(self, state: List[float], u1: float, u2: float) -> List[float]:
x, y, theta, v = state
dx = v * np.cos(theta)
dy = v * np.sin(theta)
dtheta = v * u1
dv = u2 - self.friction * v
new_v = max(min(v + dv * self.dt, self.vmax), -self.vmax)
return [x + dx * self.dt, y + dy * self.dt, theta + dtheta * self.dt, new_v]
def _get_reward_for_state(self, state: Optional[List[float]] = None) -> float:
if state is None:
state = self.state
return np.dot(self.reward_w, self.get_features(state))
def seed(self, seed: Optional[float] = None) -> List[float]:
self.np_random, seed = seeding.np_random(seed)
assert seed is not None
return [seed]
def step(
self, action: Tuple[float, float]
) -> Tuple[np.ndarray, float, bool, Dict[str, Any]]:
action = | np.array(action) | numpy.array |
# Simplest case, no bells or whistles, just hard-code integrators and test system
import numpy as np
import matplotlib
from numba import jit
from time import time
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy.stats import entropy
from benchmark import DATA_PATH
import os
# define system
np.random.seed(0)
figure_directory = "figures/" # relative to script
figure_format = ".pdf"
# Define system
beta = 1.0 # inverse temperature
dim = 1 # system dimension
@jit
def quartic_potential(x): return x**4
@jit
def quartic_force(x): return - 4.0 * x**3
@jit
def eggcrate_potential(x): return x**2 + np.sin(10 * x)
@jit
def eggcrate_force(x): return - ( 2 * ( x + 5 * np.cos(10 * x)) )
system = "quartic"
if system == "quartic":
potential = quartic_potential
force = quartic_force
elif system == "eggcrate":
potential = eggcrate_potential
force = eggcrate_force
@jit
def reduced_potential(x): return potential(x) * beta
@jit
def log_q(x): return - reduced_potential(x)
@jit
def q(x): return np.exp(log_q(x))
# normalized density
x = np.linspace(-3, 3, 1000)
x_ = np.linspace(-10,10,10000)
Z = np.trapz(q(x_))
log_Z = np.log(Z)
def p(x): return q(x) / Z
def log_p(x): return log_q(x) - log_Z
# example initial conditions
x_0, v_0 = np.random.randn(), np.random.randn()
m = 10.0 # mass
velocity_scale = np.sqrt(1.0 / (beta * m))
sigma2 = velocity_scale**2
timestep = 1.0
gamma = 100.0
# implement ovrvo
def simulate_vvvr(x0, v0, n_steps, gamma, dt, thinning_factor=1):
"""Simulate n_steps of VVVR, accumulating heat
:param x0:
:param v0:
:param n_steps:
:param gamma:
:param dt:
:return:
"""
Q = 0
W_shads = np.zeros(n_steps / thinning_factor)
x, v = x0, v0
xs, vs = np.zeros(n_steps / thinning_factor), np.zeros(n_steps / thinning_factor)
xs[0] = x0
vs[0] = v0
E_old = potential(x) + 0.5 * m * v**2
a = | np.exp(-gamma * (dt / 2.0)) | numpy.exp |
import os
import tempfile
from os import path as osp
import mmcv
import numpy as np
import torch
from mmcv.utils import print_log
from mmdet.datasets import DATASETS
from mmdet3d.core.bbox import Box3DMode, points_cam2img, CameraInstance3DBoxes
from mmdet3d.datasets.kitti_dataset import KittiDataset
@DATASETS.register_module()
class WaymoMultiViewDataset(KittiDataset):
"""
Waymo Multi-View Dataset by <NAME>
Need to prepare multi-view info.pkl first
This class serves as the API for experiments on the Waymo Dataset.
Please refer to `<https://waymo.com/open/download/>`_for data downloading.
It is recommended to symlink the dataset root to $MMDETECTION3D/data and
organize them as the doc shows.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
split (str): Split of input data.
pts_prefix (str, optional): Prefix of points files.
Defaults to 'velodyne'.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'LiDAR' in this dataset. Available options includes
- 'LiDAR': box in LiDAR coordinates
- 'Depth': box in depth coordinates, usually for indoor dataset
- 'Camera': box in camera coordinates
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
pcd_limit_range (list(float), optional): The range of point cloud used
to filter invalid predicted boxes.
Default: [-85, -85, -5, 85, 85, 5].
"""
CLASSES = ('Car', 'Cyclist', 'Pedestrian')
def __init__(self,
data_root,
ann_file,
split,
pts_prefix='velodyne',
pipeline=None,
classes=None,
modality=None,
cams=('CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT'),
box_type_3d='LiDAR',
filter_empty_gt=True,
test_mode=False,
load_interval=1,
pcd_limit_range=[-85, -85, -5, 85, 85, 5]):
super().__init__(
data_root=data_root,
ann_file=ann_file,
split=split,
pts_prefix=pts_prefix,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode,
pcd_limit_range=pcd_limit_range)
# to load a subset, just set the load_interval in the dataset config
self.data_infos = self.data_infos[::load_interval]
if hasattr(self, 'flag'):
self.flag = self.flag[::load_interval]
self.cams = cams
def _get_pts_filename(self, idx):
pts_filename = osp.join(self.root_split, self.pts_prefix,
f'{idx:07d}.bin')
return pts_filename
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Standard input_dict consists of the
data information.
- sample_idx (str): sample index
- pts_filename (str): filename of point clouds
- img_prefix (str): prefix of image files
- img_info (dict): image info
- lidar2img (list[np.ndarray], optional): transformations from
lidar to different cameras
- ann_info (dict): annotation info
"""
info = self.data_infos[index]
sample_idx = info['cams']['CAM_FRONT']['image_idx']
pts_filename = self._get_pts_filename(sample_idx)
image_paths = []
lidar2img_rts = []
for cam_name in self.cams:
cam_info = info['cams'][cam_name]
# img_path
img_path = os.path.join(self.data_root, cam_info['image_path'])
image_paths.append(img_path)
# lidar2img
cam_id = cam_info['cam_id']
lidar2cam = info['calib'][f'Tr_velo_to_cam_{cam_id}'].astype(np.float32)
intrinsic = info['calib'][f'P{cam_id}'].astype(np.float32)
lidar2img = intrinsic @ lidar2cam
lidar2img_rts.append(lidar2img)
input_dict = dict(
sample_idx=sample_idx,
pts_filename=pts_filename,
# sweeps=info['sweeps'],
timestamp=info['timestamp'] / 1e6,
img_filename=image_paths,
lidar2img=lidar2img_rts,
)
# if not self.test_mode: # load gt_bbox in test_mode for visualization
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
3D ground truth bboxes.
- gt_labels_3d (np.ndarray): Labels of ground truths.
- gt_bboxes (np.ndarray): 2D ground truth bboxes.
- gt_labels (np.ndarray): Labels of ground truths.
- gt_names (list[str]): Class names of ground truths.
- difficulty (int): Difficulty defined by KITTI.
0, 1, 2 represent xxxxx respectively.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
annos = info['annos']
# we need other objects to avoid collision when sample
annos = self.remove_dontcare(annos) # already removed 'DontCare'
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_names = annos['name']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
# TODO: check dims
# convert gt_bboxes_3d from CAM_FRONT to velodyne coordinates
Trv2c = info['calib']['Tr_velo_to_cam_0'].astype(np.float32)
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
self.box_mode_3d, np.linalg.inv(Trv2c))
gt_labels_3d = []
for cat in gt_names:
if cat in self.CLASSES:
gt_labels_3d.append(self.CLASSES.index(cat))
else:
gt_labels_3d.append(-1)
gt_labels_3d = np.array(gt_labels_3d).astype(np.int64)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, # LiDAR frame: LiDARInstance3DBoxes
gt_labels_3d=gt_labels_3d,
gt_names=gt_names,
)
return anns_results
def format_results(self,
outputs,
pklfile_prefix=None,
submission_prefix=None,
data_format='waymo'):
"""Format the results to pkl file.
Args:
outputs (list[dict]): Testing results of the dataset.
pklfile_prefix (str): The prefix of pkl files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str): The prefix of submitted files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
data_format (str, optional): Output data format.
Default: 'waymo'. Another supported choice is 'kitti'.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing
the json filepaths, tmp_dir is the temporal directory created
for saving json files when jsonfile_prefix is not specified.
"""
if pklfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
pklfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
assert ('waymo' in data_format or 'kitti' in data_format), \
f'invalid data_format {data_format}'
if (not isinstance(outputs[0], dict)) or 'img_bbox' in outputs[0]:
raise TypeError('Not supported type for reformat results.')
elif 'pts_bbox' in outputs[0]:
result_files = dict()
for name in outputs[0]:
results_ = [out[name] for out in outputs]
pklfile_prefix_ = pklfile_prefix + name
if submission_prefix is not None:
submission_prefix_ = f'{submission_prefix}_{name}'
else:
submission_prefix_ = None
result_files_ = self.bbox2result_kitti(results_, self.CLASSES,
pklfile_prefix_,
submission_prefix_)
result_files[name] = result_files_
else:
result_files = self.bbox2result_kitti(outputs, self.CLASSES,
pklfile_prefix,
submission_prefix)
if 'waymo' in data_format:
from mmdet3d.core.evaluation.waymo_utils.prediction_kitti_to_waymo import \
KITTI2Waymo # noqa
waymo_root = osp.join(
self.data_root.split('kitti_format')[0], 'waymo_format')
if self.split == 'training':
waymo_tfrecords_dir = osp.join(waymo_root, 'validation')
prefix = '1'
elif self.split == 'testing':
waymo_tfrecords_dir = osp.join(waymo_root, 'testing')
prefix = '2'
else:
raise ValueError('Not supported split value.')
save_tmp_dir = tempfile.TemporaryDirectory()
waymo_results_save_dir = save_tmp_dir.name
waymo_results_final_path = f'{pklfile_prefix}.bin'
if 'pts_bbox' in result_files:
converter = KITTI2Waymo(result_files['pts_bbox'],
waymo_tfrecords_dir,
waymo_results_save_dir,
waymo_results_final_path, prefix)
else:
converter = KITTI2Waymo(result_files, waymo_tfrecords_dir,
waymo_results_save_dir,
waymo_results_final_path, prefix)
converter.convert()
save_tmp_dir.cleanup()
return result_files, tmp_dir
def evaluate(self,
results,
metric='waymo',
logger=None,
pklfile_prefix=None,
submission_prefix=None,
show=False,
out_dir=None,
pipeline=None):
"""Evaluation in KITTI protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str], optional): Metrics to be evaluated.
Default: 'waymo'. Another supported metric is 'kitti'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
pklfile_prefix (str, optional): The prefix of pkl files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict[str: float]: results of each evaluation metric
"""
assert ('waymo' in metric or 'kitti' in metric), \
f'invalid metric {metric}'
if 'kitti' in metric:
result_files, tmp_dir = self.format_results(
results,
pklfile_prefix,
submission_prefix,
data_format='kitti')
from mmdet3d.core.evaluation import kitti_eval
gt_annos = [info['annos'] for info in self.data_infos]
if isinstance(result_files, dict):
ap_dict = dict()
for name, result_files_ in result_files.items():
eval_types = ['bev', '3d']
ap_result_str, ap_dict_ = kitti_eval(
gt_annos,
result_files_,
self.CLASSES,
eval_types=eval_types)
for ap_type, ap in ap_dict_.items():
ap_dict[f'{name}/{ap_type}'] = float(
'{:.4f}'.format(ap))
print_log(
f'Results of {name}:\n' + ap_result_str, logger=logger)
else:
ap_result_str, ap_dict = kitti_eval(
gt_annos,
result_files,
self.CLASSES,
eval_types=['bev', '3d'])
print_log('\n' + ap_result_str, logger=logger)
if 'waymo' in metric:
waymo_root = osp.join(
self.data_root.split('kitti_format')[0], 'waymo_format')
if pklfile_prefix is None:
eval_tmp_dir = tempfile.TemporaryDirectory()
pklfile_prefix = osp.join(eval_tmp_dir.name, 'results')
else:
eval_tmp_dir = None
result_files, tmp_dir = self.format_results(
results,
pklfile_prefix,
submission_prefix,
data_format='waymo')
import subprocess
ret_bytes = subprocess.check_output(
'mmdet3d/core/evaluation/waymo_utils/' +
f'compute_detection_metrics_main {pklfile_prefix}.bin ' +
f'{waymo_root}/gt.bin',
shell=True)
ret_texts = ret_bytes.decode('utf-8')
print_log(ret_texts)
# parse the text to get ap_dict
ap_dict = {
'Vehicle/L1 mAP': 0,
'Vehicle/L1 mAPH': 0,
'Vehicle/L2 mAP': 0,
'Vehicle/L2 mAPH': 0,
'Pedestrian/L1 mAP': 0,
'Pedestrian/L1 mAPH': 0,
'Pedestrian/L2 mAP': 0,
'Pedestrian/L2 mAPH': 0,
'Sign/L1 mAP': 0,
'Sign/L1 mAPH': 0,
'Sign/L2 mAP': 0,
'Sign/L2 mAPH': 0,
'Cyclist/L1 mAP': 0,
'Cyclist/L1 mAPH': 0,
'Cyclist/L2 mAP': 0,
'Cyclist/L2 mAPH': 0,
'Overall/L1 mAP': 0,
'Overall/L1 mAPH': 0,
'Overall/L2 mAP': 0,
'Overall/L2 mAPH': 0
}
mAP_splits = ret_texts.split('mAP ')
mAPH_splits = ret_texts.split('mAPH ')
for idx, key in enumerate(ap_dict.keys()):
split_idx = int(idx / 2) + 1
if idx % 2 == 0: # mAP
ap_dict[key] = float(mAP_splits[split_idx].split(']')[0])
else: # mAPH
ap_dict[key] = float(mAPH_splits[split_idx].split(']')[0])
ap_dict['Overall/L1 mAP'] = \
(ap_dict['Vehicle/L1 mAP'] + ap_dict['Pedestrian/L1 mAP'] +
ap_dict['Cyclist/L1 mAP']) / 3
ap_dict['Overall/L1 mAPH'] = \
(ap_dict['Vehicle/L1 mAPH'] + ap_dict['Pedestrian/L1 mAPH'] +
ap_dict['Cyclist/L1 mAPH']) / 3
ap_dict['Overall/L2 mAP'] = \
(ap_dict['Vehicle/L2 mAP'] + ap_dict['Pedestrian/L2 mAP'] +
ap_dict['Cyclist/L2 mAP']) / 3
ap_dict['Overall/L2 mAPH'] = \
(ap_dict['Vehicle/L2 mAPH'] + ap_dict['Pedestrian/L2 mAPH'] +
ap_dict['Cyclist/L2 mAPH']) / 3
if eval_tmp_dir is not None:
eval_tmp_dir.cleanup()
if tmp_dir is not None:
tmp_dir.cleanup()
if show or out_dir:
self.show(results, out_dir, show=show, pipeline=pipeline)
return ap_dict
def bbox2result_kitti(self,
net_outputs,
class_names,
pklfile_prefix=None,
submission_prefix=None):
"""Convert results to kitti format for evaluation and test submission.
Args:
net_outputs (List[np.ndarray]): list of array storing the
bbox and score
class_nanes (List[String]): A list of class names
pklfile_prefix (str): The prefix of pkl file.
submission_prefix (str): The prefix of submission file.
Returns:
List[dict]: A list of dict have the kitti 3d format
"""
assert len(net_outputs) == len(self.data_infos), \
'invalid list length of network outputs'
if submission_prefix is not None:
mmcv.mkdir_or_exist(submission_prefix)
det_annos = []
print('\nConverting prediction to KITTI format')
for idx, pred_dicts in enumerate(
mmcv.track_iter_progress(net_outputs)):
annos = []
info = self.data_infos[idx]
cam_info = info['cams'][self.cams[0]]
sample_idx = cam_info['image_idx']
image_shape = cam_info['image_shape'][:2]
box_dict = self.convert_valid_bboxes(pred_dicts, info)
if len(box_dict['bbox']) > 0:
box_2d_preds = box_dict['bbox']
box_preds = box_dict['box3d_camera']
scores = box_dict['scores']
box_preds_lidar = box_dict['box3d_lidar']
label_preds = box_dict['label_preds']
anno = {
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': [],
'score': []
}
for box, box_lidar, bbox, score, label in zip(
box_preds, box_preds_lidar, box_2d_preds, scores,
label_preds):
bbox[2:] = | np.minimum(bbox[2:], image_shape[::-1]) | numpy.minimum |
# This file contains the implementation of all the one-dimensional Least Squares algorithms.
import numpy as np
import scipy as sp
from scipy.optimize import root
#Least Squares fitting in R:
def least_squares_R(points, values, m):
temp = lambda x : [x ** i for i in range(m + 1)]
pi_t = lambda x : np.array([temp(x)])
pi = lambda x : np.transpose(pi_t(x))
k = len(temp(0))
leftsum = np.zeros((k,k))
rightsum = np.zeros((k, 1))
for i in range(len(points)):
leftsum += np.matmul(pi(points[i]), pi_t(points[i]))
rightsum += pi(points[i])*values[i]
cmin = np.matmul(np.linalg.inv(leftsum), rightsum)
return np.vectorize(lambda x : np.dot(pi(x).flatten(), cmin.flatten()))
#Weighted Least Squares fitting in R:
def weighted_least_squares_R(points, values, m, theta):
temp = lambda x : [x ** i for i in range(m + 1)]
k = len(temp(0))
pi_t = lambda x : np.array([temp(x)])
pi = lambda x : np.transpose(pi_t(x))
relative_theta = lambda x, j : theta(np.abs(x - points[j]))
relative_theta_sum = lambda x : sum([relative_theta(x, j) for j in range(len(points))])
def cmin(x):
leftsum = | np.zeros((k,k)) | numpy.zeros |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
import pickle
# /////////////// Corruption Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from skimage import transform, feature
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
from PIL import ImageDraw as draw
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
import os
from pkg_resources import resource_filename
warnings.simplefilter("ignore", UserWarning)
CORRUPTIONS = ['identity',
'shot_noise',
'impulse_noise',
'glass_blur',
'motion_blur',
'shear',
'scale',
'rotate',
'brightness',
'translate',
'stripe',
'fog',
'spatter',
'dotted_line',
'zigzag',
'canny_edges',]
ALL_CORRUPTIONS = ['identity',
'gaussian_noise',
'shot_noise',
'impulse_noise',
'speckle_noise',
'pessimal_noise',
'gaussian_blur',
'glass_blur',
'defocus_blur',
'motion_blur',
'zoom_blur',
'fog',
'frost',
'snow',
'spatter',
'contrast',
'brightness',
'saturate',
'jpeg_compression',
'pixelate',
'elastic_transform',
'quantize',
'shear',
'rotate',
'scale',
'translate',
'line',
'dotted_line',
'zigzag',
'inverse',
'stripe',
'canny_edges',]
with open("pessimal_noise_matrix", "rb") as f:
pessimal_noise_matrix = pickle.load(f)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
def line_from_points(c0, r0, c1, r1):
if c1 == c0:
return np.zeros((28, 28))
# Decay function defined as log(1 - d/2) + 1
cc, rr = np.meshgrid(np.linspace(0, 27, 28), np.linspace(0, 27, 28), sparse=True)
m = (r1 - r0) / (c1 - c0)
f = lambda c: m * (c - c0) + r0
dist = np.clip(np.abs(rr - f(cc)), 0, 2.3 - 1e-10)
corruption = np.log(1 - dist / 2.3) + 1
corruption = np.clip(corruption, 0, 1)
l = np.int(np.floor(c0))
r = np.int(np.ceil(c1))
corruption[:,:l] = 0
corruption[:,r:] = 0
return np.clip(corruption, 0, 1)
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def identity(x):
return np.array(x, dtype=np.float32)
def gaussian_noise(x, severity=5):
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return x.astype(np.float32)
def shot_noise(x, severity=5):
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return x.astype(np.float32)
def impulse_noise(x, severity=4):
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
x = np.clip(x, 0, 1) * 255
return x.astype(np.float32)
def speckle_noise(x, severity=5):
c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
x = | np.array(x) | numpy.array |
"""
Copyright (c) 2019, <NAME>
"""
import weakref
import numpy as np
from functools import lru_cache
from .element import transformation_matrix
__all__ = [
'load_distances',
'force_local_reactions',
'moment_local_reactions',
'local_reactions',
'clear_element_load_cache',
'ElementLoad',
]
def load_distances(dx, dy, dz, ix, delx):
"""
Returns the load distances to where an element load is applied.
Parameters
----------
dx, dy, dz : float
The element distance vector.
ix, : float
The distance from the i node of the element to where the beginning
of the loads are applied.
dx : float
The distance from the ix position toward the j node over which
the loads are applied.
"""
l = (dx**2 + dy**2 + dz**2)**0.5
l1 = l * abs(ix) if ix < 0 else ix
l2 = l * abs(delx) if delx < 0 else delx
l2 = l - l1 - l2
if l1 > l or l1 < 0 or l2 > l or l2 < 0:
raise ValueError('Load applied beyond element bounds.')
return l, l1, l2
def force_local_reactions(fx, fy, fz, dx, dy, dz, roll, ix, delx):
"""
Returns the local force reaction vector for an element.
Parameters
----------
fx, fy, fz : float
The force vector.
dx, dy, dz : float
The element distance vector.
roll : float
The roll of the element.
ix, : float
The distance from the i node of the element to where the beginning
of the loads are applied.
dx : float
The distance from the ix position toward the j node over which
the loads are applied.
"""
l, l1, l2 = load_distances(dx, dy, dz, ix, delx)
t = transformation_matrix(dx, dy, dz, roll)
if delx == 0:
# Point load
fsi = (l2**2 / l**3) * (3*l1 + l2)
fmi = l1*l2**2 / l**2
fsj = (l1**2 / l**3) * (l1 + 3*l2)
fmj = -fmi
fti = ftj = 0
fai = l2 / l
faj = l1 / l
else:
# Uniform load
fsi = (l / 2) * (1 - (2*l**3 - 2*l1**2*l + l1**3)*l1/l**4 - (2*l - l2)*l2**3/l**4)
fmi = (l**2 / 12) * (1 - (6*l**2 - 8*l1*l + 3*l1**2)*l1**2/l**4 - (4*l - 3*l2)*l2**3/l**4)
fsj = (l / 2) * (1 - (2*l - l1)*l1**3/l**4 - (2*l**3 - 2*l2**2*l + l2**3)*l2/l**4)
fmj = -(l**2 / 12) * (1 - (4*l - 3*l1)*l1**3/l**4 - (6*l**2 - 8*l2*l + 3*l2**2)*l2**2/l**4)
fti = ftj = 0
fai = (l / 2) * (l - l1 - l2) * (l - l1 + l2)
faj = -fai
fx, fy, fz = t[:3,:3].dot([fx, fy, fz])
r = [-fx*fai, -fy*fsi, -fz*fsi, -fti, -fz*fmi, -fy*fmi,
-fx*faj, -fy*fsj, -fz*fsj, -ftj, -fz*fmj, -fy*fmj]
return np.array(r, dtype='float')
def moment_local_reactions(mx, my, mz, dx, dy, dz, roll, ix, delx):
"""
Returns the local moment reaction vector for an element.
Parameters
----------
mx, my, mz : float
The moment vector.
dx, dy, dz : float
The element distance vector.
roll : float
The roll of the element.
ix, : float
The distance from the i node of the element to where the beginning
of the loads are applied.
dx : float
The distance from the ix position toward the j node over which
the loads are applied.
"""
l, l1, l2 = load_distances(dx, dy, dz, ix, delx)
t = transformation_matrix(dx, dy, dz, roll)
if delx == 0:
# Point load
fsi = -6*l1*l2 / l**3
fmi = (l2 / l**2) * (l2 - 2*l1)
fsj = -fsi
fmj = (l1 / l**2) * (l1 - 2*l2)
fti = l2 / l
ftj = l1 / l
fai = faj = 0
else:
# Uniform load
fsi = 2*((l-l2)**3 - l1**3)/l**3 - 3*((l-l2)**2 - l1**2)/l**2
fmi = ((l-l2) - l1) - 2*((l-l2)**2 - l1**2)/l + ((l-l2)**3 - l1**3)/l**2
fsj = -fsi
fmj = ((l-l2)**3 - l1**3)/l**2 - ((l-l2)**2 - l1**2)/l
fti = ((l-l2) - l1) - ((l-l2)**2 - l1**2)/(2*l)
ftj = ((l-l2)**2 - l1**2)/(2*l)
fai = faj = 0
mx, my, mz = t[:3,:3].dot([mx, my, mz])
r = [-fai, -my*fsi, -mx*fsi, -mx*fti, -my*fmi, -mz*fmi,
-faj, -my*fsj, -mx*fsj, -mx*ftj, -my*fmj, -mz*fmj]
return np.array(r, dtype='float')
@lru_cache(maxsize=1000)
def local_reactions(fx, fy, fz, mx, my, mz, dx, dy, dz, roll, ix, delx,
imx_free, imy_free, imz_free, jmx_free, jmy_free, jmz_free):
"""
Returns the local reaction vector for an element.
Parameters
----------
fx, fy, fz : float
The force vector.
mx, my, mz : float
The moment vector.
dx, dy, dz : float
The element distance vector.
roll : float
The roll of the element.
ix, : float
The distance from the i node of the element to where the beginning
of the loads are applied.
dx : float
The distance from the ix position toward the j node over which
the loads are applied.
imx_free, imy_free, imz_free : bool
The fixities at the i end of the element.
jmx_free, jmy_free, jmz_free : bool
The fixities at the j end of the element.
"""
r = force_local_reactions(fx, fy, fz, dx, dy, dz, roll, ix, delx)
r += moment_local_reactions(mx, my, mz, dx, dy, dz, roll, ix, delx)
# Adjust reactions for element end fixities
if imz_free:
if not jmz_free:
# Free-Fixed
r[9] += r[3]
r[3] = 0
else:
# Free-Free
r[3] = r[9] = 0
elif jmz_free:
# Fixed-Free
r[3] += r[9]
r[9] = 0
if imx_free:
if not jmx_free:
# Free-Fixed
r[1] -= 1.5 * r[5] / l
r[7] += 1.5 * r[5] / l
r[11] -= 0.5 * r[5]
r[5] = 0
else:
# Free-Free
r[1] -= (r[5] + r[11]) / l
r[7] += (r[5] + r[11]) / l
r[5] = r[11] = 0
elif jmx_free:
# Fixed-Free
r[1] -= 1.5 * r[11] / l
r[5] -= 0.5 * r[11]
r[7] += 1.5 * r[11] / l
r[11] = 0
if imy_free:
if not jmy_free:
# Free-Fixed
r[2] += 1.5 * r[4] / l
r[8] -= 1.5 * r[4] / l
r[10] -= 0.5 * r[4] / l
r[4] = 0
else:
# Free-Free
r[2] += (r[4] + r[10]) / l
r[8] -= (r[4] + r[10]) / l
r[4] = r[10] = 0
elif jmy_free:
# Fixed-Free
r[2] += 1.5 * r[10] / l
r[4] -= 0.5 * r[10]
r[8] -= 1.5 * r[10] / l
r[10] = 0
return r
def clear_element_load_cache():
"""Clears the element load function cache."""
local_reactions.cache_clear()
class ElementLoad(np.ndarray):
"""
A class representing an element load.
Parameters
----------
element : str
The name of the element to which the loads are applied.
fx, fy, fz : float
The global forces applied to the element.
mx, my, mz : float
The global moments applied to the element.
ix, : float
The distance from the i node at where the loads are applied.
dx : float
The distance from the ix position toward the j node over which
the loads are applied.
"""
def __new__(cls, element, fx=0, fy=0, fz=0, mx=0, my=0, mz=0, ix=0, dx=-1):
obj = | np.array([fx, fy, fz, mx, my, mz], dtype='float') | numpy.array |
'''
Functions in this Library:
do_pca(n_components, data)
lineplot(absicca, ord_1, ord_2)
pca_results(full_dataset, pca)
plot_component(pca, comp)
plot_components(X, y)
scree_plot(pca)
'''
# Standard Library Imports
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
# Functions
def do_pca(n_components, data):
'''
Transforms data using PCA to create n_components, and provides back the results of the
transformation.
INPUT: n_components - int - the number of principal components to create
data - the data you would like to transform
OUTPUT: pca - the pca object created after fitting the data
X_pca - the transformed X matrix with new number of components
'''
X = StandardScaler().fit_transform(data)
pca = PCA(n_components)
X_pca = pca.fit_transform(X)
return pca, X_pca
def lineplot(absicca, ord_1, ord_2):
fig, ax = plt.subplots(figsize=(18,10))
ax.set_title('ACTUAL vs PREDICTED VALUE', fontsize =30)
ax.set_xlabel('TEST CASE', fontsize =20)
ax.set_ylabel('$ VALUE', fontsize =20)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_ylim(0,4500000)
ax.set_xlim(250,350)
line1, = ax.plot(x, y1, label='actual')
line1.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
line2, = ax.plot(x, y2, dashes=[6, 2], label='predicted')
ax.legend(loc=1, prop={'size':20})
return
def pca_results(full_dataset, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = full_dataset.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def plot_component(pca, comp):
'''
Plots an image associated with each component to understand how the weighting
of the components
INPUT:
pca - pca object created from PCA in sklearn
comp - int - the component you want to see starting at 0
OUTPUT
None
'''
if comp <= len(pca.components_):
mat_data = np.asmatrix(pca.components_[comp]).reshape(28,28) #reshape images
plt.imshow(mat_data); #plot the data
plt.xticks([]) #removes numbered labels on x-axis
plt.yticks([]) #removes numbered labels on y-axis
else:
print('That is not the right input, please read the docstring before continuing.')
def plot_components(X, y):
'''
plots the data in a 2 dimensional space to view separation
INPUT: X - the x-matrix of input features
y - the response column
OUTPUT: none
'''
x_min, x_max = np.min(X, 0), | np.max(X, 0) | numpy.max |
import scipy.io as scio
import h5py
import numpy as np
import time
from SoundSourceLocalization.lib.utils import standard_normalizaion, wise_standard_normalizaion, shuffle_data, \
split_data
from scipy.signal import resample
def mi_load(data_path, s_id, is_processed=None):
""" load MI dataset lubin processed """
data_temp = scio.loadmat(data_path + '/A0' + str(s_id + 1) + '.mat')
data = np.transpose(data_temp['x'], (2, 0, 1))
labels = np.asarray(data_temp['y']).squeeze()
if is_processed == 'cov':
data = cov_process(data)
data = np.reshape(data, [len(data), -1])
elif is_processed == 'csp':
data = csp_process([data, labels], filter)
data = np.reshape(data, [len(data), -1])
else:
data = np.reshape(data, [data.shape[0], 1, data.shape[1], data.shape[2]])
data = standard_normalizaion(data)
s = s_id * np.ones(shape=[len(labels)])
return data, labels, s
def one_hot_encoder(y, num_classes=None, dtype='float32'):
""" copied from tf.keras.utils.to_categorical"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def load_hole_dataset(sbj_idx, ds_path, shuffle=True, normalization=None, split=None, one_hot=False):
ds = np.load(ds_path, allow_pickle=True)
x = ds['x']
y = ds['y']
del ds
x = np.concatenate(x[sbj_idx], axis=0)
x = np.expand_dims(x, axis=1)
y = np.concatenate(y[sbj_idx], axis=0)[:, -1] // 45
if one_hot:
y = one_hot_encoder(y)
if normalization is not None:
for i in range(len(x)):
x[i] = wise_standard_normalizaion(x[i], normalization)
if shuffle:
x, y = shuffle_data([x, y])
if split is not None:
split_idx = int(len(y) * split)
return x[:split_idx], y[:split_idx], x[split_idx:], y[split_idx:]
return x, y
def cov_process(data):
""" Covariance matrix """
cov_data = []
data_size = len(data)
for i in range(data_size):
data_temp = np.dot(data[i], np.transpose(data[i])) # / np.trace(np.dot(data[i], np.transpose(data[i])))
data_temp = | np.reshape(data_temp, [-1]) | numpy.reshape |
import numpy as np
from pythreejs import *
from ipywidgets import Layout
import ipywidgets as widgets
import math
from ..utils import ColorMap
class Viewer:
def __init__(self, mesh, UI = True, mesh_color=None, width=700, height=700):
self.mesh = mesh
self.scene = None
self.__UI = UI
self.width = widt
self.height = height
if 'Skeleton' in str(type(mesh)):
self.skel = mesh
self.center = list(self.skel.nodes.mean(axis=0))
if hasattr(self.skel, 'associated_mesh'):
self.mesh = self.skel.associated_mesh
if 'Skeleton' not in str(type(self.mesh)):
if mesh_color is None:
self.mesh_color = np.array([[1, 212, 180],[1, 212, 180],[1, 212, 180]], dtype=np.float) / 255
else:
self.mesh_color = np.array([mesh_color,mesh_color,mesh_color])
if 'Hexmesh' in str(type(self.mesh)) or 'Quadmesh' in str(type(self.mesh)):
self.mesh_color = np.repeat(self.mesh_color, self.mesh.num_faces*2, axis=0)
else:
self.mesh_color = np.repeat(self.mesh_color, self.mesh.num_faces, axis=0)
self.center = list(self.mesh.vertices[self.mesh.boundary()[0].flatten()].mean(axis = 0))
self.flip_x_value = False
self.flip_y_value = False
self.flip_z_value = False
if UI:
self.__create_UI()
def __create_UI(self):
"""Creates user interface
"""
# ----------------------- M E N U S L I C E -----------------------------
#style = {width: '50px'}
#titax = widgets.Label(value='Slice from axes', layout=widgets.Layout(padding='1px 1px 1px 1px', margin='1px 1px 1px 1px'))
row_layout = {'width':'100px', 'padding':'1px 1px 1px 1px', 'margin':'1px 1px 1px 1px'}
wireframe_layout = {'width':'100px','padding':'1px 1px 1px 1px', 'margin':'1px 1px 1px 1px'}
self.invisibleLayout = {'display':'none'}
self.visibleLayout = {'display':''}
self.label_layout = {'display':'block', 'max_width' : '80px'}
self.flip_x = widgets.ToggleButton(
value=False,
description='Flip x',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Flip the visualization range on x axis',
icon='check',
layout=row_layout
)
self.flip_y = widgets.ToggleButton(
value=False,
description='Flip y',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='IFlip the visualization range on y axis',
icon='check',
layout=row_layout
)
self.flip_z = widgets.ToggleButton(
value=False,
description='Flip z',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Flip the visualization range on z axis',
icon='check',
layout=row_layout
)
self.clipping_slider_x = widgets.FloatRangeSlider(
value=[self.round_down(self.mesh.cut['min_x'],3)-0.001, self.round_up(self.mesh.cut['max_x'],3)+0.001],
min=self.round_down(self.mesh.cut['min_x'],3)-0.001,
max=self.round_up(self.mesh.cut['max_x'],3)+0.001,
step=0.001,
description='X:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".3f",
layout=widgets.Layout(width='30%')
)
self.clipping_slider_y = widgets.FloatRangeSlider(
value=[self.round_down(self.mesh.cut['min_y'],3)-0.001, self.round_up(self.mesh.cut['max_y'],3)+0.001],
min=self.round_down(self.mesh.cut['min_y'],3)-0.001,
max=self.round_up(self.mesh.cut['max_y'],3)+0.001,
step=0.001,
description='Y:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".3f",
layout=widgets.Layout(width='30%')
)
self.clipping_slider_z = widgets.FloatRangeSlider(
value=[self.round_down(self.mesh.cut['min_z'],3)-0.001,self.round_up(self.mesh.cut['max_z'],3)+0.001],
min=self.round_down(self.mesh.cut['min_z'],3)-0.001,
max=self.round_up(self.mesh.cut['max_z'],3)+0.001,
step=0.001,
description='Z:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format=".3f",
layout=widgets.Layout(width='30%')
)
self.external_color = widgets.ColorPicker(
concise=True,
description='Pick a color',
value='blue',
disabled=False
)
hbox1 = widgets.HBox([self.percXp,self.flip_x])
hbox2 = widgets.HBox([self.percYp,self.flip_y])
hbox3 = widgets.HBox([self.percZp,self.flip_z])
vbox=widgets.VBox([hbox1,hbox2,hbox3],
layout=widgets.Layout(width='100%'))
self.wireframe_thickness_slider = widgets.FloatSlider(
value=0.2,
min=0.,
max=1.,
step=0.1,
continuous_update=True,
readout_format=".1f",
layout=widgets.Layout(width='30%'),
description = 'Wireframe',
disable = False,
)
self.wireframe_color_picker = widgets.ColorPicker(
concise=True,
description='Color',
value='#686868',
disabled=False,
)
self.color_map = widgets.Dropdown(
options=[(i, idx) for idx, i in enumerate(ColorMap.color_maps.keys())],
value=0,
description='Color-Map:',
layout=self.invisibleLayout
)
self.coloring_type_menu = widgets.Dropdown(
options=[('Default', 0), ('Simplex Quality', 1), ('Label',2)],
value=0,
description='Type Color:',
)
self.metric_menu = widgets.Dropdown(
options= [(i, idx) for idx, i in enumerate(self.mesh.simplex_metrics.keys())],
value=0,
description='Metric:',
layout=self.invisibleLayout
)
self.color_internal = widgets.ColorPicker(
concise=True,
description='Color inside',
value='#FF9C00',
disabled=False,
)
self.color_label_pickers = [widgets.ColorPicker(
concise=True,
description='Label ' + str(i),
value= self.listColor(int(i)),
disabled=False,
layout= self.invisibleLayout
) for i in range(len(np.unique(self.mesh.labels)))]
self.flip_x.observe(self.__slicing, names='value')
self.percXp.observe(self.__slicing, names='value')
self.flip_y.observe(self.__slicing, names='value')
self.percYp.observe(self.__slicing, names='value')
self.flip_z.observe(self.__slicing, names='value')
self.percZp.observe(self.__slicing, names='value')
self.wireSlider.observe(self.__set_wireframe_width, names='value')
self.colorWireframe.observe(self.__set_wireframe_color, names='value')
self.colorMap.observe(self.change_color_map, names='value')
self.colorSurface.observe(self.change_color_surface, names='value')
self.colorInside.observe(self.change_color_inside, names='value')
self.chosen_metric.observe(self.change_color_map, names='value')
[i.observe(self.change_color_label,names='value') for i in self.itemsColorsLabel]
self.typeColorSurface.observe(self.change_type_color, names='value')
#menu slice
vvbox=widgets.VBox([vbox], layout={'height': '100px'})
#menu rendering
box_rendering = widgets.HBox([self.wireSlider,self.colorWireframe], layout={'height': '100px'})
box_rendering01 = widgets.HBox([self.colorSurface], layout={'height': '100px'})
if 'Hexmesh' in str(type(self.mesh)) or 'Tetmesh' in str(type(self.mesh)):
box_rendering01 = widgets.HBox([self.typeColorSurface,self.colorMap, self.chosen_metric, self.colorSurface, self.colorInside] + self.itemsColorsLabel, layout={'height': '130px'})
else:
box_rendering01 = widgets.HBox([self.typeColorSurface,self.colorMap, self.chosen_metric, self.colorSurface] + self.itemsColorsLabel, layout={'height': '100px'})
#boxRendering02 = widgets.HBox(self.itemsColorsLabel)
#boxRendering1 = widgets.HBox([boxRendering01,boxRendering02])
vertical_rendering = widgets.VBox([box_rendering, box_rendering01], layout={'height': '130px'})
self.accordion = widgets.Accordion(children=[vvbox, vertical_rendering])
self.accordion.set_title(0,"Slice from axes")
self.accordion.set_title(1,"Rendering")
display(self.accordion)
def __set_wireframe_color(self, change=None):
self.line_.material.color = self.colorWireframe.value
def __set_wireframe_width(self, change=None):
self.line_.material.opacity = self.wireSlider.value
def listColor(self,n):
if n == 0:
color = '#ff0000'
elif n == 1:
color = '#ffff00'
elif n == 2:
color = '#00ffff'
elif n == 3:
color = '#ff00ff'
elif n == 4:
color = '#0000ff'
elif n == 5:
color = '#af0fa0'
elif n == 6:
color = '#f0a0f0'
else:
color = '#ffffff'
return color
def round_up(self,n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(self,n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
def change_color_label(self, change=None):
if self.mesh_color.shape[0] != self.mesh.labels.shape[0]:
self.mesh_color = np.zeros((self.mesh.labels.shape[0], 3))
for idx, color in enumerate(self.itemsColorsLabel):
self.mesh_color[self.mesh.labels == idx] = [int(color.value[1:3],16)/255,int(color.value[3:5],16)/255,int(color.value[5:7],16)/255]
if 'Hexmesh' in str(type(self.mesh)):
self.mesh_color = np.repeat(self.mesh_color, 6*2*3, axis=0)
elif 'Quadmesh' in str(type(self.mesh)):
self.mesh_color = np.repeat(self.mesh_color, 2*3, axis=0)
elif 'Tetmesh' in str(type(self.mesh)):
self.mesh_color = np.repeat(self.mesh_color, 4*3, axis=0)
else:
self.mesh_color = np.repeat(self.mesh_color, 3, axis=0)
self.__update_draw()
def change_color_surface(self, change=None):
faces_per_poly = 0
faces_in_face = 2
if 'Tetmesh' in str(type(self.mesh)):
faces_per_poly = 4
faces_in_face = 1
elif 'Hexmesh' in str(type(self.mesh)):
faces_per_poly = 6
elif 'Quadmesh' in str(type(self.mesh)):
faces_per_poly = 1
elif 'Trimesh' in str(type(self.mesh)):
faces_per_poly = 1
faces_in_face = 1
mesh_color = [int(self.colorSurface.value[1:3],16)/255,int(self.colorSurface.value[3:5],16)/255,int(self.colorSurface.value[5:7],16)/255]
if 'Trimesh' in str(type(self.mesh)) or 'Quadmesh' in str(type(self.mesh)):
indices = np.repeat(self.mesh.boundary()[1], faces_per_poly*faces_in_face*3)
else:
indices = np.logical_not(np.repeat(self.mesh.internals, faces_per_poly*faces_in_face*3))
self.mesh_color[indices] = np.array(mesh_color)
self.__update_draw()
def change_color_inside(self, change=None):
faces_per_poly = 0
faces_in_face = 2
if 'Tetmesh' in str(type(self.mesh)):
faces_per_poly = 4
faces_in_face = 1
elif 'Hexmesh' in str(type(self.mesh)):
faces_per_poly = 6
mesh_color = [int(self.colorInside.value[1:3],16)/255,int(self.colorInside.value[3:5],16)/255,int(self.colorInside.value[5:7],16)/255]
indices = np.repeat(self.mesh.internals, faces_per_poly*faces_in_face*3)
self.mesh_color[indices] = np.array(mesh_color)
self.__update_draw()
def change_side_view(self,change=None):
if change.new == 'Front':
self.view_fromt_side()
elif change.new == 'Back':
self.view_back_side()
elif change.new == 'Double':
self.view_double_side()
def change_color_map(self, change=None):
metric_keys = list(self.mesh.simplex_metrics.keys())
metric_idx = metric_keys[self.chosen_metric.value]
metric = self.mesh.simplex_metrics[metric_idx][1]
color_map_keys = list(ColorMap.color_maps.keys())
color_map_idx = color_map_keys[self.colorMap.value]
color_map = ColorMap.color_maps[color_map_idx]
min_range = self.mesh.simplex_metrics[metric_idx][0][0]
max_range = self.mesh.simplex_metrics[metric_idx][0][1]
if ( min_range is None or max_range is None):
min_range = | np.min(metric) | numpy.min |
print('Importing packages...')
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import seaborn as sns
import numpy as np
import matplotlib.dates as mdates
import datetime
#sns.set(color_codes=True)
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
import statistics as st
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
from statsmodels.distributions.empirical_distribution import ECDF
import scipy
import gc
column_list = ['scen_num', 'reopening_multiplier_4']
for ems_region in range(1,12):
column_list.append('hosp_det_EMS-' + str(ems_region))
column_list.append('hosp_det_cumul_EMS-' + str(ems_region))
column_list.append('detected_cumul_EMS-' + str(ems_region))
#Specify paths to trajectories. For this run, all trajectories were temporarily stored in the same folder.
print('Reading trajectories...')
sub1 = pd.read_csv('trajectoriesDat_1.csv', usecols=column_list) #0.08 - 0.09
print('Trajectory 1 read.')
sub2 = pd.read_csv('trajectoriesDat_2.csv', usecols=column_list) #0.10 - 0.115
print('Trajectory 2 read.')
sub3 = pd.read_csv('trajectoriesDat_3.csv', usecols=column_list) #0.087 - 0.10
print('Trajectory 3 read.')
sub4 = pd.read_csv('trajectoriesDat_08.csv', usecols=column_list) # 0.08 - 0.10
sub4['scen_num'] = sub4['scen_num'].values + 1000
print('Trajectory 4 read.')
sub5 = pd.read_csv('trajectoriesDat_300.csv', usecols=column_list) #0.1 - 0.11
sub5['scen_num'] = sub5['scen_num'].values + 2000
print('Trajectory 5 read.')
sub6 = pd.read_csv('trajectoriesDat_600.csv', usecols=column_list) #0.115 - 0.13
sub6['scen_num'] = sub6['scen_num'].values + 2000
print('Trajectory 6 read.')
sub7 = pd.read_csv('trajectoriesDat_1000.csv', usecols=column_list) #0.13 - 0.15
sub7['scen_num'] = sub7['scen_num'].values + 2000
print('Trajectory 7 read.')
sub8 = pd.read_csv('trajectoriesDat_15.csv', usecols=column_list) #0.13 - 0.15
sub8['scen_num'] = sub8['scen_num'].values + 3000
print('Trajectory 8 read.')
###loop here
for region in ['NE', 'NC', 'CE', 'SO']:
for capacity in ['high', 'low']:
for metric in ['det', 'hosp']: #current implementation only allows tracking new_detected and new_hosp.
boink = []
### Region
#hospital_capacity = 1907
#NE 4919 8609 12299
#NC 1089 1907 2724
#CE 856 1498 2140
#SO 640 1121 1601
### Metric to assess:
if metric == 'det':
notif = 'new_det_' + region
if metric == 'hosp':
notif = 'new_hosp_det_' + region
### Simulation Dates to Examine
lower_limit = 145
upper_limit = 225
grain = 1
prob_over_array = []
range_1 = np.arange(0, 25, 0.01)
### Capacity
### Which trajectories to use for each capacity were determined by hand.
if region == 'NE':
if capacity == 'low':
hospital_capacity = 4919
trajectories = pd.concat([sub1, sub3, sub4]).reset_index()
elif capacity == 'high':
hospital_capacity = 8609
trajectories = pd.concat([sub1, sub2, sub3]).reset_index()
elif region == 'NC':
if capacity == 'low':
hospital_capacity = 1089
trajectories = pd.concat([sub4, sub5, sub6, sub7]).reset_index()
elif capacity == 'high':
hospital_capacity = 1907
trajectories = pd.concat([sub5, sub6, sub7]).reset_index()
elif region == 'CE':
if capacity == 'low':
hospital_capacity = 856
trajectories = pd.concat([sub5, sub6, sub7]).reset_index()
elif capacity == 'high':
hospital_capacity = 1498
trajectories = sub8 #pd.concat([sub5, sub6, sub7, sub8]).reset_index() ##need new
elif region == 'SO':
if capacity == 'low':
hospital_capacity = 640
trajectories = pd.concat([sub1, sub2, sub3]).reset_index()
elif capacity == 'high':
hospital_capacity = 1121
trajectories = pd.concat([sub5, sub6, sub7]).reset_index()
#NE Region
trajectories['hosp_det_NE'] = trajectories['hosp_det_EMS-11'] + \
trajectories['hosp_det_EMS-10'] + \
trajectories['hosp_det_EMS-9'] + \
trajectories['hosp_det_EMS-8'] + \
trajectories['hosp_det_EMS-7']
trajectories['hosp_det_cumul_NE'] = trajectories['hosp_det_cumul_EMS-11'] + \
trajectories['hosp_det_cumul_EMS-10'] + \
trajectories['hosp_det_cumul_EMS-9'] + \
trajectories['hosp_det_cumul_EMS-8'] + \
trajectories['hosp_det_cumul_EMS-7']
trajectories['detected_cumul_NE'] = trajectories['detected_cumul_EMS-11'] + \
trajectories['detected_cumul_EMS-10'] + \
trajectories['detected_cumul_EMS-9'] + \
trajectories['detected_cumul_EMS-8'] + \
trajectories['detected_cumul_EMS-7']
#NC Region
trajectories['hosp_det_NC'] = trajectories['hosp_det_EMS-1'] + trajectories['hosp_det_EMS-2']
trajectories['hosp_det_cumul_NC'] = trajectories['hosp_det_cumul_EMS-1'] + trajectories['hosp_det_cumul_EMS-2']
trajectories['detected_cumul_NC'] = trajectories['detected_cumul_EMS-1'] + trajectories['detected_cumul_EMS-2']
#CE Region
trajectories['hosp_det_CE'] = trajectories['hosp_det_EMS-3'] + trajectories['hosp_det_EMS-6']
trajectories['hosp_det_cumul_CE'] = trajectories['hosp_det_cumul_EMS-3'] + trajectories['hosp_det_cumul_EMS-6']
trajectories['detected_cumul_CE'] = trajectories['detected_cumul_EMS-3'] + trajectories['detected_cumul_EMS-6']
#SO Region
trajectories['hosp_det_SO'] = trajectories['hosp_det_EMS-4'] + trajectories['hosp_det_EMS-5']
trajectories['hosp_det_cumul_SO'] = trajectories['hosp_det_cumul_EMS-4'] + trajectories['hosp_det_cumul_EMS-5']
trajectories['detected_cumul_SO'] = trajectories['detected_cumul_EMS-4'] + trajectories['detected_cumul_EMS-5']
print('Region: ' + region)
print('Capacity: ' + str(capacity))
print('Metric: ' + str(notif))
thresh = []
p_array = []
dates_array = []
over_array = []
no_array = []
days_array = np.arange(lower_limit,upper_limit, grain)
for notif_period in days_array:
trajectories_new = trajectories
unique_scen = np.array(list(set(trajectories_new['scen_num'].values)))
overflow_date = []
max_date = []
#notif = 'new_detected'
overflow_traj = []
traj = []
non_overflow_traj = []
overflow_scens = []
non_overflow_scens = []
non_overflow_crit_day = []
overflow_crit_day = []
overflow_week = []
overflow_prior_week = []
non_overflow_week = []
non_overflow_prior_week = []
crit_day = []
week = []
week_prior = []
crit = notif_period
for scen in unique_scen:
new = trajectories_new[(trajectories_new['scen_num'] == scen)].reset_index()
new['new_hosp_det_NE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NE'].values))
new['new_det_NE'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NE'].values))
new['new_hosp_det_NC'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NC'].values))
new['new_det_NC'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NC'].values))
new['new_hosp_det_CE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_CE'].values))
new['new_det_CE'] = np.append( | np.array([0.0]) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.