prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
'''
Project: https://github.com/fabro66/GAST-Net-3DPoseEstimation
'''
import numpy as np
h36m_coco_order = [9, 11, 14, 12, 15, 13, 16, 4, 1, 5, 2, 6, 3]
coco_order = [0, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
spple_keypoints = [10, 8, 0, 7]
scores_h36m_toe_oeder = [1, 2, 3, 5, 6, 7, 11, 13, 14, 15, 16, 17, 18]
kpts_h36m_toe_order = [0, 1, 2, 3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
scores_coco_order = [12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]
h36m_mpii_order = [3, 2, 1, 4, 5, 6, 0, 8, 9, 10, 16, 15, 14, 11, 12, 13]
mpii_order = [i for i in range(16)]
lr_hip_shouler = [2, 3, 12, 13]
def coco_h36m(keypoints):
temporal = keypoints.shape[0]
keypoints_h36m = np.zeros_like(keypoints, dtype=np.float32)
htps_keypoints = np.zeros((temporal, 4, 2), dtype=np.float32)
# htps_keypoints: head, thorax, pelvis, spine
htps_keypoints[:, 0, 0] = np.mean(keypoints[:, 1:5, 0], axis=1, dtype=np.float32)
htps_keypoints[:, 0, 1] = np.sum(keypoints[:, 1:3, 1], axis=1, dtype=np.float32) - keypoints[:, 0, 1]
htps_keypoints[:, 1, :] = np.mean(keypoints[:, 5:7, :], axis=1, dtype=np.float32)
htps_keypoints[:, 1, :] += (keypoints[:, 0, :] - htps_keypoints[:, 1, :]) / 3
htps_keypoints[:, 2, :] = np.mean(keypoints[:, 11:13, :], axis=1, dtype=np.float32)
htps_keypoints[:, 3, :] = np.mean(keypoints[:, [5, 6, 11, 12], :], axis=1, dtype=np.float32)
keypoints_h36m[:, spple_keypoints, :] = htps_keypoints
keypoints_h36m[:, h36m_coco_order, :] = keypoints[:, coco_order, :]
keypoints_h36m[:, 9, :] -= (keypoints_h36m[:, 9, :] - np.mean(keypoints[:, 5:7, :], axis=1, dtype=np.float32)) / 4
keypoints_h36m[:, 7, 0] += 2*(keypoints_h36m[:, 7, 0] - np.mean(keypoints_h36m[:, [0, 8], 0], axis=1, dtype=np.float32))
keypoints_h36m[:, 8, 1] -= (np.mean(keypoints[:, 1:3, 1], axis=1, dtype=np.float32) - keypoints[:, 0, 1])*2/3
# half body: the joint of ankle and knee equal to hip
# keypoints_h36m[:, [2, 3]] = keypoints_h36m[:, [1, 1]]
# keypoints_h36m[:, [5, 6]] = keypoints_h36m[:, [4, 4]]
valid_frames = np.where(np.sum(keypoints_h36m.reshape(-1, 34), axis=1) != 0)[0]
return keypoints_h36m, valid_frames
def mpii_h36m(keypoints):
temporal = keypoints.shape[0]
keypoints_h36m = np.zeros((temporal, 17, 2), dtype=np.float32)
keypoints_h36m[:, h36m_mpii_order] = keypoints
# keypoints_h36m[:, 7] = np.mean(keypoints[:, 6:8], axis=1, dtype=np.float32)
keypoints_h36m[:, 7] =
|
np.mean(keypoints[:, lr_hip_shouler], axis=1, dtype=np.float32)
|
numpy.mean
|
import numpy as np
from time import time as t
from downscale.utils.utils_func import change_dtype_if_required
from downscale.utils.decorators import print_func_executed_decorator, timer_decorator, change_dtype_if_required_decorator
from downscale.utils.context_managers import timer_context
try:
from numba import jit, prange, float64, float32, int32, int64
_numba = True
except ModuleNotFoundError:
_numba = False
try:
import numexpr as ne
_numexpr = True
except ModuleNotFoundError:
_numexpr = False
class Wind_utils:
def __init__(self):
pass
@staticmethod
def apply_log_profile(z_in=None, z_out=None, wind_in=None, z0=None, library="numexpr",
verbose=True, z_in_verbose=None, z_out_verbose=None):
"""
Apply log profile to a wind time serie.
Parameters
----------
z_in : Input elevation
z_out : Output elevation
wind_in : Input wind
z0 : Roughness length for momentum
verbose: boolean
z_in_verbose: str
z_out_verbose: str
Returns
-------
wind_out: Output wind, after logarithmic profile
"""
print(f"Applied log profile: {z_in_verbose} => {z_out_verbose}") if verbose else None
if _numexpr and library == "numexpr":
return ne.evaluate("(wind_in * log(z_out / z0)) / log(z_in / z0)")
else:
return (wind_in * np.log(z_out / z0)) / np.log(z_in / z0)
def _exposed_wind_speed_xarray(self, xarray_data, z_out=10):
dims = xarray_data.Wind.dims
return xarray_data.assign(exp_Wind=(dims,
self._exposed_wind_speed_num(wind_speed=xarray_data.Wind.values,
z_out=z_out,
z0=xarray_data.Z0.values,
z0_rel=xarray_data.Z0REL.values)))
def _exposed_wind_speed_num(self, wind_speed=None, z_out=None, z0=None, z0_rel=None, library="numexpr"):
if _numexpr and library == "numexpr":
acceleration_factor = ne.evaluate(
"log((z_out) / z0) * (z0 / (z0_rel+z0))**0.0706 / (log((z_out) / (z0_rel+z0)))")
acceleration_factor = ne.evaluate("where(acceleration_factor > 0, acceleration_factor, 1)")
exp_Wind = ne.evaluate("wind_speed * acceleration_factor")
else:
acceleration_factor = np.log(z_out / z0) * (z0 / (z0_rel + z0)) ** 0.0706 / (
np.log(z_out / (z0_rel + z0)))
acceleration_factor = np.where(acceleration_factor > 0, acceleration_factor, 1)
exp_Wind = wind_speed * acceleration_factor
return exp_Wind, acceleration_factor
@print_func_executed_decorator("expose wind speed", level_begin="\n__", level_end="__")
@timer_decorator("expose wind speed", unit="second", level=". . . . ")
def exposed_wind_speed(self, wind_speed=None, z_out=None, z0=None, z0_rel=None,
library="numexpr", xarray_data=None, level_verbose="____", verbose=True):
"""
Expose wind speed (deoperate subgrid parameterization from NWP)
Parameters
----------
wind_speed : ndarray
Wind speed from NWP
z_out : ndarray
Elevation at which desinfluencing is performed
z0 : ndarray
Roughness length for momentum
z0_rel : ndarray
Roughness length for momentum associated with mean topography
apply_directly_to_nwp : boolean, optional
If True, updates the nwp directly by adding a new variable (Default: False)
library: str
verbose: boolean
Returns
-------
exp_Wind: Unexposed wind
acceleration_factor: Acceleration related to unexposition (usually >= 1)
"""
if library == "xarray":
return self._exposed_wind_speed_xarray(xarray_data, z_out=z_out)
else:
return self._exposed_wind_speed_num(wind_speed=wind_speed, z_out=z_out, z0=z0,
z0_rel=z0_rel, library=library)
@staticmethod
def _scale_wind_for_ideal_case(wind_speed, wind_dir, input_speed, input_dir, verbose=True):
"""
Specify specific wind speed and direction for ideal cases.
Parameters
----------
wind_speed : array
wind_dir : array
input_speed : constant
input_dir : constant
Returns
-------
wind_speed : array
Constant wind speed.
wind_direction : array
Constant wind direction.
"""
wind_speed = np.full(wind_speed.shape, input_speed)
wind_dir = np.full(wind_dir.shape, input_dir)
print("__Wind speed scaled for ideal cases") if verbose else None
return wind_speed, wind_dir
@print_func_executed_decorator("wind speed ratio (acceleration)", level_begin="\n__", level_end="__")
@timer_decorator("wind speed ratio (acceleration)", unit="second", level=". . . . ")
def wind_speed_ratio(self, num=None, den=None, library="numexpr", verbose=True):
if _numexpr and library == "numexpr":
print("____Library: Numexpr") if verbose else None
a1 = ne.evaluate("where(den > 0, num / den, 1)")
else:
print("____Library: Numpy") if verbose else None
a1 = np.where(den > 0, num / den, 1)
return a1
def _3D_wind_speed(self, U=None, V=None, W=None, out=None, library="numexpr", verbose_level="____", verbose=True):
"""
UVW = np.sqrt(U**2 + V**2 + W**2)
Parameters
----------
U : dnarray
Horizontal wind speed component U
V : string, optional
Horizontal wind speed component V
W : string, optional
Vertical wind speed component W
out : ndarray, optional
If specified, the output of the caluclation is directly written in out,
which is best for memory consumption (Default: None)
Returns
-------
UVW : ndarray
Wind speed
"""
if verbose:
t0 = t()
if out is None:
if _numexpr and library == "numexpr":
print("____Library: Numexpr") if verbose else None
wind_speed = ne.evaluate("sqrt(U**2 + V**2 + W**2)")
else:
print("____Library: Numpy") if verbose else None
wind_speed = np.sqrt(U ** 2 + V ** 2 + W ** 2)
if verbose:
print(f"{verbose_level}Wind speed computed")
print(f". . . . Time to calculate wind speed: {np.round(t() - t0)} seconds")
return wind_speed
else:
if _numexpr and library == "numexpr":
ne.evaluate("sqrt(U**2 + V**2 + W**2)", out=out)
else:
np.sqrt(U ** 2 + V ** 2 + W ** 2, out=out)
if verbose:
print(f"{verbose_level}Wind speed computed")
print(f". . . . Time to calculate wind speed: {np.round(t()-t0)}")
def _2D_wind_speed(self, U=None, V=None, out=None, library="numexpr", verbose_level="____", verbose=True):
"""
UV = np.sqrt(U**2 + V**2)
Parameters
----------
U : dnarray
Horizontal wind speed component U
V : string, optional
Horizontal wind speed component V
out : ndarray, optional
If specified, the output of the caluclation is directly written in out,
which is best for memory consumption (Default: None)
Returns
-------
UV : ndarray
Wind speed
"""
if verbose:
t0 = t()
if out is None:
if _numexpr and library == "numexpr":
return ne.evaluate("sqrt(U**2 + V**2)")
else:
return np.sqrt(U ** 2 + V ** 2)
else:
if _numexpr and library == "numexpr":
ne.evaluate("sqrt(U**2 + V**2)", out=out)
else:
np.sqrt(U ** 2 + V ** 2, out=out)
if verbose:
print(f"{verbose_level}Wind speed computed")
print(f". . . . Time to calculate wind speed: {np.round(t()-t0)}")
def _compute_wind_speed_num(self, U=None, V=None, W=None, verbose=True):
if W is None:
wind_speed = self._2D_wind_speed(U=U, V=V, verbose=verbose)
else:
wind_speed = self._3D_wind_speed(U=U, V=V, W=W, verbose=verbose)
return wind_speed
@change_dtype_if_required_decorator(np.float32)
def _compute_wind_speed_num_out(self, U=None, V=None, W=None, out=None, verbose=True):
if W is None:
self._2D_wind_speed(U=U, V=V, out=out, verbose=verbose)
else:
self._3D_wind_speed(U=U, V=V, W=W, out=out, verbose=verbose)
def compute_wind_speed_xarray(self, xarray_data, u_name="U", v_name="V", verbose=True):
dims = xarray_data[u_name].dims
return xarray_data.assign(Wind=(dims, self._compute_wind_speed_num(xarray_data[u_name].values,
xarray_data[v_name].values,
verbose=verbose)))
def compute_wind_speed(self, U=None, V=None, W=None,
library='num', out=None,
xarray_data=None, u_name="U", v_name="V",
verbose=True, time_level=". . . . ", name_to_print="compute wind speed", unit="second"):
"""
Calculates wind speed from wind speed components.
First detects the number of wind component then calculates wind speed.
The calculation can be performed on numexpr, numpy or xarray dataset
Parameters
----------
U : dnarray
Horizontal wind speed component U
V : string, optional
Horizontal wind speed component V
W : string, optional
Vertical wind speed component V
out : ndarray, optional
If specified, the output of the calculation is directly written in out,
which is best for memory consumption (Default: None)
library : str, optional
Select the librairie to use for calculation. If 'num' first test numexpr, if not available select numpy.
If 'xarray', a xarray dataframe needs to be specified with the corresponding names for wind components.
(Default: 'num')
Returns
-------
UV : ndarray
Wind speed
"""
# Numexpr or numpy
with timer_context(name_to_print, level=time_level, unit=unit, verbose=verbose):
if library == 'num':
if out is None:
return self._compute_wind_speed_num(U=U, V=V, W=W, verbose=verbose)
else:
self._compute_wind_speed_num_out(U=U, V=V, W=W, out=out, verbose=verbose)
if library == 'xarray':
return self.compute_wind_speed_xarray(xarray_data, u_name=u_name, v_name=v_name, verbose=verbose)
def compute_speed_and_direction_xarray(self, xarray_data, u_name="U", v_name="V", verbose=True):
assert u_name in xarray_data
assert v_name in xarray_data
xarray_data = self.compute_wind_speed(library="xarray", xarray_data=xarray_data,
u_name=u_name, v_name=v_name, verbose=verbose)
xarray_data = self.direction_from_u_and_v(library="xarray", xarray_data=xarray_data,
u_name=u_name, v_name=v_name, verbose=verbose)
print("____Wind and Wind_DIR calculated on xarray") if verbose else None
return xarray_data
@print_func_executed_decorator("wind speed scaling", level_begin="\n__", level_end="__")
@timer_decorator("wind speed scaling", unit="second", level=". . . . ")
def wind_speed_scaling(self, scaling_wind, prediction, type_scaling="linear", library="numexpr", verbose=True):
"""
Linear: scaling_wind * prediction / 3
Decrease acceleration and deceleration:
Arctan_30_1: 30*np.arctan(scaling_wind/30) * prediction / 3
Decrease acceleration only:
Arctan_30_2: 30*np.arctan((scaling_wind* prediction / 3)/30)
Parameters
----------
scaling_wind : ndarray
Scaling wind (ex: NWP wind)
prediction : ndarray
CNN ouptut
linear : boolean, optional
Linear scaling (Default: True)
Returns
-------
prediction : ndarray
Scaled wind
"""
scaling_wind = scaling_wind.astype(np.float32)
prediction = prediction.astype(np.float32)
thirty = np.float32(30)
three = np.float32(3)
if type_scaling == "linear":
if _numexpr and library == "numexpr":
print("____Library: Numexpr")
prediction = ne.evaluate("scaling_wind * prediction / 3")
else:
print("____Library: Numpy")
prediction = scaling_wind * prediction / 3
if type_scaling == "Arctan_30_1":
if _numexpr and library == "numexpr":
prediction = ne.evaluate("30*arctan(scaling_wind/30) * prediction / 3")
else:
prediction = 30*np.arctan(scaling_wind/30) * prediction / 3
if type_scaling == "Arctan_30_2":
if _numexpr and library == "numexpr":
print("____Library: Numexpr")
prediction = ne.evaluate("thirty*arctan((scaling_wind * prediction / three)/thirty)")
else:
print("____Library: Numpy")
prediction = 30*np.arctan((scaling_wind * prediction / 3)/30)
if type_scaling == "Arctan_10_2":
if _numexpr and library == "numexpr":
prediction = ne.evaluate("10*arctan((scaling_wind * prediction / 3)/10)")
else:
prediction = 10*np.arctan((scaling_wind * prediction / 3)/10)
if type_scaling == "Arctan_20_2":
if _numexpr and library == "numexpr":
prediction = ne.evaluate("30*arctan((scaling_wind * prediction / 3)/30)")
else:
prediction = 20*np.arctan((scaling_wind * prediction / 3)/20)
if type_scaling == "Arctan_38_2_2":
constant = np.float32(38.2)
scaling_wind = change_dtype_if_required(scaling_wind, np.float32)
prediction = change_dtype_if_required(prediction, np.float32)
if _numexpr and library == "numexpr":
prediction = ne.evaluate("constant*arctan((scaling_wind * prediction / three)/constant)")
else:
prediction = constant*np.arctan((scaling_wind * prediction / three)/constant)
if type_scaling == "Arctan_40_2":
if _numexpr and library == "numexpr":
prediction = ne.evaluate("30*arctan((scaling_wind * prediction / 3)/30)")
else:
prediction = 40*np.arctan((scaling_wind * prediction / 3)/40)
if type_scaling == "Arctan_50_2":
if _numexpr and library == "numexpr":
prediction = ne.evaluate("30*arctan((scaling_wind * prediction / 3)/30)")
else:
prediction = 50*np.arctan((scaling_wind * prediction / 3)/50)
prediction = change_dtype_if_required(prediction, np.float32)
print(f"____Type {type_scaling}") if verbose else None
return prediction
@change_dtype_if_required_decorator(np.float32)
@print_func_executed_decorator("computing angular deviation", level_begin="____", level_end="____")
@timer_decorator("computing angular deviation", unit="second", level=". . . . ")
def angular_deviation(self, U, V, unit_output="radian", library="numexpr", verbose=True):
"""
Angular deviation from incoming flow.
The incoming flow is supposed from the West so that V=0. If deviated, V != 0.
The angular deviation is then np.arctan(V / U)
Parameters
----------
U : dnarray
Horizontal wind speed component U
V : string, optional
Horizontal wind speed component V
Returns
-------
alpha : ndarray
Angular deviation [rad]
"""
if _numexpr and library == "numexpr":
print("____Library: Numexpr") if verbose else None
alpha = ne.evaluate("where(U == 0, where(V == 0, 0, V/abs(V) * 3.14159 / 2), arctan(V / U))")
else:
print("____Library: Numpy") if verbose else None
alpha = np.where(U == 0,
np.where(V == 0, 0, np.sign(V) * np.pi / 2),
np.arctan(V / U))
if unit_output == "degree":
alpha = self._rad_to_deg_num(alpha)
return alpha
@change_dtype_if_required_decorator(np.float32)
@print_func_executed_decorator("computing wind direction from angular deviation", level_begin="____", level_end="____")
@timer_decorator("computing wind direction from angular deviation", unit="second", level=". . . . ")
def direction_from_alpha(self, wind_dir, alpha, unit_direction ="degree", unit_alpha="radian", unit_output="radian",
library="numexpr", verbose=True):
"""
wind_dir - alpha
Wind direction modified by angular deviation due to wind/topography interaction.
Warning: this function might return a new wind direction in a rotated coordinates.
Parameters
----------
wind_dir : dnarray
Initial NWP wind direction
alpha : dnarray
Angular deviation
input_dir_in_degree : boolean, optionnal
If True, converts the input wind direction from radans to degrees (Default: True)
Returns
-------
alpha : ndarray
Modified wind direction
"""
if unit_direction == "degree":
wind_dir = self._deg_to_rad_num(wind_dir, library="numexpr")
if unit_alpha == "degree":
alpha = self._deg_to_rad_num(alpha, library="numexpr")
if _numexpr and library == "numexpr":
print("____Library: Numexpr") if verbose else None
UV_DIR = ne.evaluate("wind_dir - alpha")
else:
print("____Library: Numpy") if verbose else None
UV_DIR = wind_dir - alpha
if unit_output == "degree":
UV_DIR = self._rad_to_deg_num(UV_DIR)
return UV_DIR
@change_dtype_if_required_decorator(np.float32)
def _u_zonal_component(self, UV=None, UV_DIR=None, library="numexpr", unit_direction="degree", verbose=True):
assert unit_direction == "radian"
if _numexpr and library == "numexpr":
print(f"____Library: {library}") if verbose else None
return ne.evaluate("-sin(UV_DIR) * UV")
else:
print("____Library: numpy") if verbose else None
return -np.sin(UV_DIR) * UV
@change_dtype_if_required_decorator(np.float32)
def _v_meridional_component(self, UV=None, UV_DIR=None, library="numexpr", unit_direction="degree", verbose=True):
assert unit_direction == "radian"
if _numexpr and library == "numexpr":
print(f"____Library: {library}") if verbose else None
return ne.evaluate("-cos(UV_DIR) * UV")
else:
print("____Library: numpy") if verbose else None
return -np.cos(UV_DIR) * UV
@print_func_executed_decorator("degree to radians", level_begin="________", level_end="________")
@timer_decorator("converting degrees to radians", unit="second", level=". . . . ")
@change_dtype_if_required_decorator(np.float32)
def _deg_to_rad_num(self, UV_DIR, library="numexpr"):
if _numexpr and library == "numexpr":
return ne.evaluate("3.14159 * UV_DIR/180")
else:
return np.deg2rad(UV_DIR)
@print_func_executed_decorator("radians to degree", level_begin="________", level_end="________")
@timer_decorator("converting radians to degrees", unit="second", level=". . . . ")
@change_dtype_if_required_decorator(np.float32)
def _rad_to_deg_num(self, UV_DIR, library="numexpr"):
if _numexpr and library == "numexpr":
return ne.evaluate("180 * UV_DIR/3.14159")
else:
return np.rad2deg(UV_DIR)
def _horizontal_wind_component_num(self, UV, UV_DIR, library="numexpr", unit_direction="degree", verbose=True):
if unit_direction == "degree":
UV_DIR = self._deg_to_rad_num(UV_DIR, library=library)
unit_direction = "radian"
U = self._u_zonal_component(UV=UV, UV_DIR=UV_DIR, library=library, unit_direction=unit_direction,
verbose=verbose)
V = self._v_meridional_component(UV=UV, UV_DIR=UV_DIR, library=library, unit_direction=unit_direction,
verbose=verbose)
return U, V
def _horizontal_wind_component_xarray(self, xarray_data, wind_name="UV", wind_dir_name="UV_DIR",
unit_direction="degree", verbose=True):
if unit_direction == "degree":
xarray_data = xarray_data.assign(theta=lambda x: (np.pi / 180) * (x[wind_dir_name] % 360))
xarray_data = xarray_data.assign(U=lambda x: -x[wind_name] * np.sin(x["theta"]))
xarray_data = xarray_data.assign(V=lambda x: -x[wind_name] * np.cos(x["theta"]))
return xarray_data
@print_func_executed_decorator("computing horizontal wind components", level_begin="____", level_end="____")
@timer_decorator("computing horizontal wind components", unit="second", level=". . . . ")
def horizontal_wind_component(self, UV=None, UV_DIR=None,
library='numexpr', xarray_data=None, wind_name="Wind",
wind_dir_name="Wind_DIR", unit_direction="radian", verbose=True):
"""
U = -np.sin(UV_DIR) * UV
V = -np.cos(UV_DIR) * UV
Computes U and V component from wind direction and wind speed
Parameters
----------
UV : dnarray
Wind speed
UV_DIR : dnarray
Wind_direction
working_with_xarray : boolean, optionnal
If True, computes U and V on an xarray dataframe (provided with names of variables)
and returns the dataframe
Returns
-------
U : dnarray
Horizontal wind speed component U
V : string, optional
Horizontal wind speed component V
References
----------
Liston, <NAME>., & <NAME>. (2006). A meteorological distribution system for high-resolution terrestrial modeling (MicroMet). Journal of Hydrometeorology, 7(2), 217-234.
"""
if library == "xarray":
print("____Library: xarray") if verbose else None
return self._horizontal_wind_component_xarray(xarray_data,
wind_name=wind_name,
wind_dir_name=wind_dir_name,
unit_direction=unit_direction,
verbose=False)
else:
return self._horizontal_wind_component_num(UV, UV_DIR, library=library,
unit_direction=unit_direction,
verbose=verbose)
@change_dtype_if_required_decorator(np.float32)
def _direction_from_u_and_v_num(self, U, V, library="numexpr", verbose=True):
if _numexpr and library == "numexpr":
print("____Library: Numexpr") if verbose else None
return ne.evaluate("(180 + (180/3.14159)*arctan2(U,V)) % 360")
else:
print("____Library: Numpy") if verbose else None
return np.mod(180 + np.rad2deg(
|
np.arctan2(U, V)
|
numpy.arctan2
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import numpy.fft as fft
import scipy.signal as sig
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
import csv
import datetime
#design output
#v=0 critical current v stuff
#time series for quiet squid
#time series for d
# In[2]:
import time, sys
from IPython.display import clear_output
def update_progress(progress):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
# In[3]:
def noisyRK4(s,th,tau,derivsRK,par,vn10,vn20,vn11,vn21,vn12,vn22):
"""RK4 integrator modified to use noise
DEPENDENCIES
derivsRK - RHS of ODE, fn defined somewhere
INPUTS
s - state vector
th - time, theta
tau - time step size
derivsRK - RHS of ODE, fn defined somewhere
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
sout - new state vector new time
[delta_1,delta_2,ddelta_1,ddelta_2,d^2delta_1,d^2delta_2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
# noisySQUIDrk(s,th,alpha,beta,eta,rho,i,phia,vn1,vn2)
half_tau = 0.5*tau
F1 = derivsRK(s,th,par,vn10,vn20) # use current voltage noise
th_half = th + half_tau
stemp = s + half_tau*F1
F2 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
stemp = s + half_tau*F2
F3 = derivsRK(stemp,th_half,par,vn11,vn21) # use half-tau step voltage noise
th_full = th + tau
stemp = s + tau*F3
F4 = derivsRK(stemp,th_full,par,vn12,vn22) # use full-tau step voltage noise
sout = s + tau/6.*(F1 + F4 + 2.*(F2 + F3))
return sout
# In[4]:
def noisySQUIDrk(s,th,par,vn1,vn2):
"""Returns RHS of ODE
DEPENDENCIES
numpy as np
INPUTS
s - state vector [del1(theta), del2(theta)]
th - time, theta
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
nv1,nv2 - noise values at each junction
OUTPUTS
deriv - array
[ddel1/dth, ddel2/dth, d^2del1/dth^2, d^2del2/dth^2]"""
# parse out parameter array
alpha = par[0]; beta = par[1]; eta = par[2]
rho = par[3]; i = par[4]; phia = par[5]; Gamma=par[6]
betaC=par[7]; kappa=par[8]
#del1 = s[0] # del_1(theta)
#del2 = s[1] # del_2(theta)
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
dddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC)
dddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC)
ddel1 = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*dddel1)/(1-rho) + vn1 # ddel1/dth
ddel2 = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*dddel2)/(1+rho) + vn2 # ddel2/dth
deriv = np.array([ddel1,ddel2,dddel1,dddel2])
return(deriv)
# In[5]:
def noisySQUID(nStep,tau,s,par):
"""Handles RK4 solver, returns time series sim of SQUID
DEPENDENCIES
noisySQUIDrk - modified RK4 solver
numpy as np
INPUTS
nStep - number of steps
tau - time step size
s - initial state vector
par - array
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
alpha - critical current symmetry parameter (0 to 1)
beta - inductance constant
eta - inductance symmetry parameter (0 to 1)
rho - resistance symmetry parameter (0 to 1)
i - dimensionless bias current
phia - dimensionless applied flux
Gamma - Johnson noise parameter
betaC - capacitance constant
kappa - capacitance symmetry parameter
OUTPUTS
S - time series state vector
[theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v]"""
#parse out the parameter vector
alpha=par[0]; betaL=par[1]; eta=par[2]; rho=par[3]
i=par[4]; phia=par[5]; Gamma=par[6]; betaC=par[7]
kappa=par[8]
# change state vector s to include all the derivs
# little s denotes a 1-d vector of, current values
# big S denotes the output array of all s, a 2-d array in time
## NOISE ##
# set an appropriate variance based on Gamma.
# variance is twice normal because freq of noise
# is twice that of the sampling freq so that rk4 has
# a noise value to use at each half tau step
var = 4*Gamma/tau
sd = var**.5
# make two time series of noise voltages
# lowercase designators are current values, uppercase are arrays in time
VN1 = np.zeros(2*nStep+1)
VN2 = np.zeros(2*nStep+1)
for ist in range(2*nStep+1):
VN1[ist] = np.random.normal(0,sd)
VN2[ist] = np.random.normal(0,sd)
# DATA STRUCTURE
# S = [theta,del1,del2,ddel1,ddel2,dddel1,dddel2,j,v]
S = np.zeros([8,nStep],float)
# set initial conditions
theta = 0.
S[0,0] = theta
S[1,0] = s[0] # del1
S[2,0] = s[1] # del2
j = (s[0] - s[1] - 2*np.pi*phia)/(np.pi*betaL) - eta*i/2
S[3,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-kappa)*betaC*s[4])/(1-rho) # ddel1
S[4,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+kappa)*betaC*s[5])/(1+rho) # ddel2
S[5,0] = (.5*i-j-(1-alpha)*np.sin(s[0])-(1-rho)*s[2])/((1-kappa)*betaC) # dddel1
S[6,0] = (.5*i+j-(1+alpha)*np.sin(s[1])-(1+rho)*s[3])/((1+kappa)*betaC) # dddel2
s = np.copy(S[1:5,0])
for iStep in range(1,nStep):
vn10 = VN1[2*iStep-2]
vn20 = VN2[2*iStep-2]
vn11 = VN1[2*iStep-1]
vn21 = VN2[2*iStep-1]
vn12 = VN1[2*iStep]
vn22 = VN2[2*iStep]
# noisyRK4(s,th,alpha,beta,eta,rho,i,phia,tau,derivsRK,vn10,vn20,vn11,vn21,vn12,vn22)
s = noisyRK4(s,theta,tau,noisySQUIDrk,par,vn10,vn20,vn11,vn21,vn12,vn22)
S[0,iStep] = theta # time theta
S[1,iStep] = s[0] # del1
S[2,iStep] = s[1] # del2
S[3,iStep] = s[2] # ddel1
S[4,iStep] = s[3] # ddel2
#S[5,iStep] = # dddel1
#S[6,iStep] = # dddel2
theta = theta + tau
# S[5,:] =
# S[6,:] =
S[6] = S[3]*(1+eta)/2 + S[4]*(1-eta)/2
return(S)
# In[9]:
def vj_timeseries(nStep,tau,s,par):
"""Returns time series simulation of squid, figure and csv
DEPENDENCIES
qSQUID()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha,betaL,eta,rho,i,phia,Gamma,betaC,kappa]
OUTPUTS
figure - plots of
voltage time series w average
circulating current time series w average
output to screen
png 'timeseriesdatetime.png' saved to parent directory
csv - time series csv file containing
theta,delta_1,delta_2,j,ddel1/dth,ddel2/dth,v
csv 'timeseriesdatetime.csv' saved to parent directory
"""
# run sim
S = noisySQUID(nStep,tau,s,par)
# chop off first 10% of time series to remove any transient
md = int(.1*len(S[0,:]))
# build figure title with parameters used
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s'% (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)))+'\n'+ r'$\rho$=%s, $i$=%s, $\phi_a$=%s' % (str(round(par[3],3)),str(round(par[4],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
# plot
fig, ax = plt.subplots(2,1,figsize=(3,7))
fig.suptitle(ti)
ax1 = plt.subplot(2,1,1)
ax1.plot(S[0,md:],S[6,md:])
ax1.hlines((sum(S[6,md:])/len(S[6,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax1.set(ylabel="Voltage, v",
xticklabels=([]))
ax2 = plt.subplot(2,1,2)
ax2.plot(S[0,md:],S[3,md:])
ax2.hlines((sum(S[3,md:])/len(S[3,md:])),S[0,md],S[0,-1],linestyle='dotted')
ax2.set(ylabel="Circ Current, j",
xlabel=r"Time,$\theta$")
# create output file metadata
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
meta2 = ['# nStep=%s'%nStep,'tau=%s'%tau]
header = ['theta','delta_1','delta_2','j','ddel1/dth','ddel2/dth','v']
csvtime = datetime.datetime.now()
timestr = [datetime.datetime.strftime(csvtime, '# %Y/%m/%d, %H:%M:%S')]
timeti = str(datetime.datetime.strftime(csvtime, '%Y%m%d%H%M%S'))
csvtitle='timeseries'+timeti+'.csv'
pngtitle='timeseris'+timeti+'.png'
Sf = np.matrix.transpose(S)
# create, write, output(close) csv file
with open(csvtitle, 'w') as csvFile:
filewr = csv.writer(csvFile,delimiter=',')
filewr.writerow(timestr)
filewr.writerow(meta1)
filewr.writerow(meta2)
filewr.writerow(header)
filewr.writerows(Sf)
csvFile.close()
# save figure
fig.savefig(pngtitle)
print('csv file written out:', csvtitle)
print('png file written out:', pngtitle)
# In[11]:
def iv_curve(nStep,tau,s,par,alpha=0,betaL=0,eta=0,rho=0,phia=0,Gamma=0,betaC=0,kappa=0):
"""Returns contour plot and data file for IV curves
DEPENDENCIES
qSQUID()
update_progress()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha, beta_L, eta, rho, i, phia]
input parameter LIST - alpha, beta, eta, rho, phia
multiple values of input parameter as list
draws contour for each
if given, overwrites value in par
if not given, value from par is used for one contour
ONLY SUPPLY maximum of one input list here
OUTPUTS
plot - IV contours at levels given in input param array
output to screen
png 'IVdatetime.png' saved to parent directory
csv - IV contours at levels given
csv 'IVdatetime.png' saved to parent directory
"""
# create currents to sweep
i = np.arange(0.,6.,.1)
ch = 0 # check for only one parameter sweeped.
k = 1 # set 0 axis dim to 1 at min
md = int(0.1*len(i)) # cut of the first 10 percent of points in time series
# check if an array was given for an input parameter
# k - length of input parameter array (number of contours)
# parj - build a list of parameters to pass at each array value of that parameter
# la, lc - plot label and csv header lable
# lb - rename parameter array to add in plot and header later
# ti - plot title
# meta1 - csv metadata
# ch - check value, check for only one input parameter array, or none for one contour
if alpha != 0:
alpha = np.array(alpha)
k = len(alpha)
parj = np.zeros([k,9])
la = r'$\alpha$'; lc = 'alpha'
lb = np.copy(alpha)
ti = r'$\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# add input array values to iteration parameters as appropriate
for j in range(k):
parj[j,:] = np.array([alpha[j],par[1],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if betaL != 0:
betaL = np.array(betaL)
k = len(betaL)
parj = np.zeros([k,9])
la = r'$\beta_L$'; lc = 'betaL'
lb = np.copy(betaL)
ti = r'$\alpha$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[2],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],betaL[j],par[2],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if eta != 0:
eta = np.array(eta)
k = len(eta)
parj = np.zeros([k,9])
la = r'$\eta$'; lc = 'eta'
lb = np.copy(eta)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\rho$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[3],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],eta[j],par[3],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if rho != 0:
rho = np.array(rho)
k = len(rho)
parj = np.zeros([k,9])
la = r'$\rho$'; lc = 'rho'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\phi_a$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[5],3)))+'\n'+ r'$\Gamma$=$s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],rho[j],0.,par[5],par[6],par[7],par[8]])
ch = ch + 1
if phia != 0:
phia = np.array(phia)
k = len(phia)
parj = np.zeros([k,9])
la = r'$\phi_a$'; lc = 'phi_a'
lb = np.copy(phia)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' %(str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,phia[j],par[6],par[7],par[8]])
ch = ch + 1
if Gamma != 0:
Gamma = np.array(Gamma)
k = len(Gamma)
parj = np.zeros([k,9])
la = r'$\Gamma$'; lc = 'Gamma'
lb = np.copy(Gamma)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s, $\phi_a$=%s, $\beta_C$=%s, $\kappa$=%s' % (par[0],par[1],par[2],par[3],par[5],par[7],par[8])
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'betaC=%s'%par[7],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],Gamma[j],par[7],par[8]])
ch = ch + 1
if betaC != 0:
betaC = np.array(betaC)
k = len(betaC)
parj = np.zeros([k,9])
la = r'$\beta_C$'; lc = 'betaC'
lb = np.copy(betaC)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\kappa$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'i=%s'%par[4],'phia=%s'%par[5],'Gamma=%s'%par[6],'kappa=%s'%par[8]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],betaC[j],par[8]])
ch = ch + 1
if kappa != 0:
kappa = np.array(kappa)
k = len(kappa)
parj = np.zeros([k,9])
la = r'$\kappa$'; lc = 'kappa'
lb = np.copy(kappa)
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+'\n'+ r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s' %(str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7]]
for j in range(k):
parj[j,:] = np.array([par[0],par[1],par[2],par[3],0.,par[5],par[6],par[7],kappa[j]])
ch = ch + 1
# if check value is more than one, too many input parameter arrays given
if ch > 1:
return('Please supply at most one parameter to sweep')
# if check value zero, assume plotting only one contour
if ch == 0:
parj = np.zeros([2,9])
parj[0,:] = par
parj[1,:] = par
ti = r'$\alpha$=%s, $\beta_L$=%s, $\eta$=%s, $\rho$=%s' % (str(round(par[0],3)),str(round(par[1],3)),str(round(par[2],3)),str(round(par[3],3)))+ '\n' + r'$\phi_a$=$s, $\Gamma$=%s, $\beta_C$=%s, $\kappa$=%s' % (str(round(par[5],3)),str(round(par[6],3)),str(round(par[7],3)),str(round(par[8],3)))
meta1 = ['# alpha=%s'%par[0],'betaL=%s'%par[1],'eta=%s'%par[2],'rho=%s'%par[3],'phia=%s'%par[5],'Gamma=%s'%par[6],'betaC=%s'%par[7],'kappa=%s'%par[8]]
# build sim output array of appropriate size
# needs as many rows as contours determined by input parameter array
if k > 1:
V = np.zeros([k,len(i)])
else:
V = np.zeros([2,len(i)])
# cp - check progress, total outputs in V
cp = k*len(i)
# loop over k rows and len(i) colums of V
# fill V with average voltage from time series for given params
# parjj - parameter array for this time series
# S - state array output from sim
for j in range(k):
parjj = parj[j,:]
for m in range(len(i)):
parjj[4] = i[m]
S = noisySQUID(nStep,tau,s,parjj)
V[j,m] = sum(S[6,md:])/len(S[6,md:])
# new progress bar current iter/total iters
update_progress((m + j*len(i))/cp)
# fill out progress bar
update_progress(1)
# build output for csv
# join i values and average Voltage matrix
Sf = np.concatenate((np.matrix(i),V),axis=0)
# flip independent axis, i, from horizontal to vertical
Sf = np.matrix.transpose(Sf)
# convert from matrix to array to ease csv output
Sf = np.array(Sf)
# make a figure
# header - csv header info, param input value for contour
fig,ax = plt.subplots()
# one contour, or
if k == 1:
ax.plot(V[0],i)
header = ['i','V']
# k contours
else:
header = ['i']*(k+1)
for j in range(k):
ax.plot(V[j],i,label= la + '=%s' % str(round(lb[j],3)))
header[j+1] = lc + '=%s' % str(round(lb[j],3))
# ic = 0 line for comparison
ax.plot(np.arange(0,2.6,.1),np.arange(0,5.2,.2),'--',
label=r"$i_c=0$")
ax.set(title=ti,
xlabel=r"Average voltage, $\bar{v}$",
ylabel="Bias current, i",
xlim=[0,2.5],ylim=[0,6.])
ax.legend()
fig.tight_layout()
# build rest of metadata needed for csv
meta2 = ['# nStep=%s'%nStep,'tau=%s'%tau]
csvtime = datetime.datetime.now()
timestr = [datetime.datetime.strftime(csvtime, '# %Y/%m/%d, %H:%M:%S')]
timeti = str(datetime.datetime.strftime(csvtime, '%Y%m%d%H%M%S'))
csvtitle='IV'+timeti+'.csv'
pngtitle='IV'+timeti+'.png'
# create, write, and save(close) csv
with open(csvtitle, 'w') as csvFile:
filewr = csv.writer(csvFile,delimiter=',')
filewr.writerow(timestr)
filewr.writerow(meta1)
filewr.writerow(meta2)
filewr.writerow(header)
filewr.writerows(Sf)
csvFile.close()
# save figure
fig.savefig(pngtitle)
print('csv file written out:', csvtitle)
print('png file written out:', pngtitle)
# In[8]:
def vphi_curve(nStep,tau,s,par,alpha=0,betaL=0,eta=0,rho=0,i=0,Gamma=0,betaC=0,kappa=0):
"""Returns contour plot and data file for IV curves
DEPENDENCIES
qSQUID()
update_progress()
numpy as np
matplotlib.pyplot as plt
INPUTS
nStep - number of steps to run in time series
tau - step size for time series
s - initial state vector [delta_1[theta=0],delta_2[theta=0]]
par - parameter vector
[alpha, beta_L, eta, rho, i, phia]
input parameter LIST - alpha, beta, eta, rho, phia
multiple values of input parameter as list
draws contour for each
if given, overwrites value in par
if not given, value from par is used for one contour
ONLY SUPPLY maximum of one input list here
OUTPUTS
plot - IV contours at levels given in input param array
output to screen
png 'IVdatetime.png' saved to parent directory
csv - IV contours at levels given
csv 'IVdatetime.png' saved to parent directory
"""
# create currents to sweep
phia =
|
np.arange(0.,1.05,.05)
|
numpy.arange
|
import operator
import numpy as np
from knapsack.hyper.single import heurs1knpsck as single
cached_heuristics = None
def get_heuristics():
global cached_heuristics
if cached_heuristics is not None:
return cached_heuristics
single_heuristics = single.get_all_single_heuristics()
result = []
ksp_choice_functions = [least_weight_overall, most_weight_overall, least_cost_overall, most_cost_overall,
most_efficiency_overall, least_efficiency_overall, most_capacity, least_capacity,
most_efficiency_potentially, least_efficiency_potentially]
for single_heuristic in single_heuristics:
for ksp_choice_function in ksp_choice_functions:
def single_knapsack_with_exteme_property(current, tabooed_indexes,
ksp_choice_function=ksp_choice_function,
my_single_heuristic=single_heuristic[0], **kwargs):
current = list(current)
indexed_properties = ksp_choice_function(current, **kwargs)
modified_index = -1
while modified_index == -1 and len(indexed_properties) > 0:
ksp_index = indexed_properties.pop()[0]
single_ksp_kwargs = {"costs": kwargs["costs"], "weights": kwargs["weights"][ksp_index],
"size": kwargs["sizes"][ksp_index]}
# TODO should multiple include constraint be here?
multi_include_constraint = build_multi_include_constraint(current, ksp_index)
tabooed_indexes = list(set(tabooed_indexes).union(set(multi_include_constraint)))
new_included, modified_index = my_single_heuristic(current[ksp_index], tabooed_indexes,
**single_ksp_kwargs)
current[ksp_index] = new_included
return current, modified_index
result.append((single_knapsack_with_exteme_property, single_heuristic[1]))
result = normalize_probabilities(result)
cached_heuristics = result
return result
def normalize_probabilities(result):
sum_probabilities = np.sum([item[1] for item in result])
result = list(map(lambda x: (x[0], x[1] / sum_probabilities), result))
return result
def least_weight_overall(included, **kwargs):
weights = np.asarray(kwargs["weights"])
indexed_properties = np.sum(np.asarray(included) * weights, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1), reverse=True))
return indexed_properties
def most_weight_overall(included, **kwargs):
weights = np.asarray(kwargs["weights"])
indexed_properties = np.sum(np.asarray(included) * weights, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1)))
return indexed_properties
def least_cost_overall(included, **kwargs):
costs = np.asarray(kwargs["costs"])
indexed_properties = np.sum(np.asarray(included) * costs, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1), reverse=True))
return indexed_properties
def most_cost_overall(included, **kwargs):
costs = np.asarray(kwargs["costs"])
indexed_properties = np.sum(np.asarray(included) * costs, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1)))
return indexed_properties
def most_efficiency_overall(included, **kwargs):
included = np.asarray(included)
costs = np.asarray(kwargs["costs"])
weights = np.asarray(kwargs["weights"])
indexed_properties = np.asarray([costs / weight for weight in weights])
indexed_properties = np.sum(included * indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1)))
return indexed_properties
def least_efficiency_overall(included, **kwargs):
included = np.asarray(included)
costs = np.asarray(kwargs["costs"])
weights = np.asarray(kwargs["weights"])
indexed_properties = np.asarray([costs / weight for weight in weights])
indexed_properties = np.sum(included * indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1), reverse=True))
return indexed_properties
def most_capacity(included, **kwargs):
weights = np.asarray(kwargs["weights"])
sizes = np.asarray(kwargs["sizes"])
indexed_properties = [weight / size for weight, size in zip(weights, sizes)]
indexed_properties = np.sum(indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1)))
return indexed_properties
def least_capacity(included, **kwargs):
weights = np.asarray(kwargs["weights"])
sizes = np.asarray(kwargs["sizes"])
indexed_properties = [weight / size for weight, size in zip(weights, sizes)]
indexed_properties = np.sum(indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1), reverse=True))
return indexed_properties
def most_efficiency_potentially(included, **kwargs):
costs = np.asarray(kwargs["costs"])
weights = np.asarray(kwargs["weights"])
indexed_properties = np.asarray([costs / weight for weight in weights])
indexed_properties = np.sum(indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1)))
return indexed_properties
def least_efficiency_potentially(included, **kwargs):
costs = np.asarray(kwargs["costs"])
weights = np.asarray(kwargs["weights"])
indexed_properties = np.asarray([costs / weight for weight in weights])
indexed_properties = np.sum(indexed_properties, axis=1)
indexed_properties = enumerate(indexed_properties)
indexed_properties = list(sorted(indexed_properties, key=operator.itemgetter(1), reverse=True))
return indexed_properties
def build_multi_include_constraint(current, ksp_index):
tabu = []
column_sums =
|
np.sum(current, axis=0)
|
numpy.sum
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 17:34:39 2020
@author: amarmore
"""
# A module containing some high-level scripts for decomposition and/or segmentation.
import soundfile as sf
import librosa.core
import librosa.feature
import tensorly as tl
import os
import numpy as np
import pathlib
import random
import nn_fac.ntd as NTD
import musicntd.autosimilarity_segmentation as as_seg
import musicntd.data_manipulation as dm
import musicntd.model.features as features
import musicntd.model.errors as err
from musicntd.model.current_plot import *
def load_RWC_dataset(music_folder_path, annotations_type = "MIREX10", desired_format = None, downbeats = None):
"""
Load the data on the RWC dataset, ie path of songs and annotations.
The annotations can be either AIST or MIREX 10.
Parameters
----------
music_folder_path : String
Path of the folder to parse.
annotations_type : "AIST" [1] or "MIREX10" [2]
The type of annotations to load (both have a specific behavior and formatting)
The default is "MIREX10"
desired_format : DEPRECATED
downbeats : DEPRECATED
Raises
------
NotImplementedError
If the format is not taken in account.
Returns
-------
numpy array
list of list of paths, each sublist being of the form [song, annotations, downbeat(if specified)].
References
----------
[1] <NAME>. (2006, October). AIST Annotation for the RWC Music Database. In ISMIR (pp. 359-360).
[2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2014, January).
Semiotic description of music structure: An introduction to the Quaero/Metiss structural annotations.
"""
if downbeats != None or desired_format != None:
raise err.OutdatedBehaviorException("This version of loading is deprecated.")
# Load dataset paths at the format "song, annotations, downbeats"
paths = []
for file in os.listdir(music_folder_path):
if file[-4:] == ".wav":
file_number = "{:03d}".format(int(file[:-4]))
ann = dm.get_annotation_name_from_song(file_number, annotations_type)
paths.append([file, ann])
return
|
np.array(paths)
|
numpy.array
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from pycochleagram import utils
def _identity(x):
"""Identity function. Return x.
"""
return x
def freq2lin(freq_hz):
"""Compatibility hack to allow for linearly spaced cosine filters
with `make_erb_cos_filters_nx`; intended to generalize the
functionality of `make_lin_cos_filters`.
"""
return _identity(freq_hz)
def lin2freq(n_lin):
"""Compatibility hack to allow for linearly spaced cosine filters
with `make_erb_cos_filters_nx`; intended to generalize the
functionality of `make_lin_cos_filters`.
"""
return _identity(n_lin)
def freq2erb(freq_hz):
"""Converts Hz to human-defined ERBs, using the formula of Glasberg
and Moore.
Args:
freq_hz (array_like): frequency to use for ERB.
Returns:
ndarray:
**n_erb** -- Human-defined ERB representation of input.
"""
return 9.265 * np.log(1 + freq_hz / (24.7 * 9.265))
def erb2freq(n_erb):
"""Converts human ERBs to Hz, using the formula of Glasberg and Moore.
Args:
n_erb (array_like): Human-defined ERB to convert to frequency.
Returns:
ndarray:
**freq_hz** -- Frequency representation of input.
"""
return 24.7 * 9.265 * (np.exp(n_erb / 9.265) - 1)
def make_cosine_filter(freqs, l, h, convert_to_erb=True):
"""Generate a half-cosine filter. Represents one subband of the cochleagram.
A half-cosine filter is created using the values of freqs that are within the
interval [l, h]. The half-cosine filter is centered at the center of this
interval, i.e., (h - l) / 2. Values outside the valid interval [l, h] are
discarded. So, if freqs = [1, 2, 3, ... 10], l = 4.5, h = 8, the cosine filter
will only be defined on the domain [5, 6, 7] and the returned output will only
contain 3 elements.
Args:
freqs (array_like): Array containing the domain of the filter, in ERB space;
see convert_to_erb parameter below.. A single half-cosine
filter will be defined only on the valid section of these values;
specifically, the values between cutoffs ``l`` and ``h``. A half-cosine filter
centered at (h - l ) / 2 is created on the interval [l, h].
l (float): The lower cutoff of the half-cosine filter in ERB space; see
convert_to_erb parameter below.
h (float): The upper cutoff of the half-cosine filter in ERB space; see
convert_to_erb parameter below.
convert_to_erb (bool, default=True): If this is True, the values in
input arguments ``freqs``, ``l``, and ``h`` will be transformed from Hz to ERB
space before creating the half-cosine filter. If this is False, the
input arguments are assumed to be in ERB space.
Returns:
ndarray:
**half_cos_filter** -- A half-cosine filter defined using elements of
freqs within [l, h].
"""
if convert_to_erb:
freqs_erb = freq2erb(freqs)
l_erb = freq2erb(l)
h_erb = freq2erb(h)
else:
freqs_erb = freqs
l_erb = l
h_erb = h
avg_in_erb = (l_erb + h_erb) / 2 # center of filter
rnge_in_erb = h_erb - l_erb # width of filter
# return np.cos((freq2erb(freqs[a_l_ind:a_h_ind+1]) - avg)/rnge * np.pi) # h_ind+1 to include endpoint
# return np.cos((freqs_erb[(freqs_erb >= l_erb) & (freqs_erb <= h_erb)]- avg_in_erb) / rnge_in_erb * np.pi) # map cutoffs to -pi/2, pi/2 interval
return np.cos((freqs_erb[(freqs_erb > l_erb) & (freqs_erb < h_erb)]- avg_in_erb) / rnge_in_erb * np.pi) # map cutoffs to -pi/2, pi/2 interval
def make_full_filter_set(filts, signal_length=None):
"""Create the full set of filters by extending the filterbank to negative FFT
frequencies.
Args:
filts (array_like): Array containing the cochlear filterbank in frequency space,
i.e., the output of make_erb_cos_filters_nx. Each row of ``filts`` is a
single filter, with columns indexing frequency.
signal_length (int, optional): Length of the signal to be filtered with this filterbank.
This should be equal to filter length * 2 - 1, i.e., 2*filts.shape[1] - 1, and if
signal_length is None, this value will be computed with the above formula.
This parameter might be deprecated later.
Returns:
ndarray:
**full_filter_set** -- Array containing the complete filterbank in
frequency space. This output can be directly applied to the frequency
representation of a signal.
"""
if signal_length is None:
signal_length = 2 * filts.shape[1] - 1
# note that filters are currently such that each ROW is a filter and COLUMN idxs freq
if np.remainder(signal_length, 2) == 0: # even -- don't take the DC & don't double sample nyquist
neg_filts = np.flipud(filts[1:filts.shape[0] - 1, :])
else: # odd -- don't take the DC
neg_filts = np.flipud(filts[1:filts.shape[0], :])
fft_filts = np.vstack((filts, neg_filts))
# we need to switch representation to apply filters to fft of the signal, not sure why, but do it here
return fft_filts.T
def make_ref_cos_filters_nx(signal_length, sr, n, low_lim, hi_lim, sample_factor, padding_size=None, full_filter=True, strict=True, ref_spacing_mode='erb', **kwargs):
"""Create ERB cosine filters, oversampled by a factor provided by "sample_factor"
Args:
signal_length (int): Length of signal to be filtered with the generated
filterbank. The signal length determines the length of the filters.
sr (int): Sampling rate associated with the signal waveform.
n (int): Number of filters (subbands) to be generated with standard
sampling (i.e., using a sampling factor of 1). Note, the actual number of
filters in the generated filterbank depends on the sampling factor, and
will also include lowpass and highpass filters that allow for
perfect reconstruction of the input signal (the exact number of lowpass
and highpass filters is determined by the sampling factor). The
number of filters in the generated filterbank is given below:
+---------------+---------------+-+------------+---+---------------------+
| sample factor | n_out |=| bandpass |\ +| highpass + lowpass |
+===============+===============+=+============+===+=====================+
| 1 | n+2 |=| n |\ +| 1 + 1 |
+---------------+---------------+-+------------+---+---------------------+
| 2 | 2*n+1+4 |=| 2*n+1 |\ +| 2 + 2 |
+---------------+---------------+-+------------+---+---------------------+
| 4 | 4*n+3+8 |=| 4*n+3 |\ +| 4 + 4 |
+---------------+---------------+-+------------+---+---------------------+
| s | s*(n+1)-1+2*s |=| s*(n+1)-1 |\ +| s + s |
+---------------+---------------+-+------------+---+---------------------+
low_lim (int): Lower limit of frequency range. Filters will not be defined
below this limit.
hi_lim (int): Upper limit of frequency range. Filters will not be defined
above this limit.
sample_factor (int): Positive integer that determines how densely ERB function
will be sampled to create bandpass filters. 1 represents standard sampling;
adjacent bandpass filters will overlap by 50%. 2 represents 2x overcomplete sampling;
adjacent bandpass filters will overlap by 75%. 4 represents 4x overcomplete sampling;
adjacent bandpass filters will overlap by 87.5%.
padding_size (int, optional): If None (default), the signal will not be padded
before filtering. Otherwise, the filters will be created assuming the
waveform signal will be padded to length padding_size*signal_length.
full_filter (bool, default=True): If True (default), the complete filter that
is ready to apply to the signal is returned. If False, only the first
half of the filter is returned (likely positive terms of FFT).
strict (bool, default=True): If True (default), will throw an error if
sample_factor is not a power of two. This facilitates comparison across
sample_factors. Also, if True, will throw an error if provided hi_lim
is greater than the Nyquist rate.
Returns:
tuple:
A tuple containing the output:
* **filts** (*array*)-- The filterbank consisting of filters have
cosine-shaped frequency responses, with center frequencies equally
spaced on an ERB scale from low_lim to hi_lim.
* **center_freqs** (*array*) -- something
* **freqs** (*array*) -- something
Raises:
ValueError: Various value errors for bad choices of sample_factor; see
description for strict parameter.
"""
ref_spacing_mode = ref_spacing_mode.lower()
if ref_spacing_mode == 'erb':
_freq2ref = freq2erb
_ref2freq = erb2freq
elif ref_spacing_mode == 'lin' or 'linear':
_freq2ref = freq2lin
_ref2freq = lin2freq
# elif callable(spacing_mode) # need fx and inv_fx for this
else:
raise NotImplementedError('unrecognized spacing mode: %s' % ref_spacing_mode)
if not isinstance(sample_factor, int):
raise ValueError('sample_factor must be an integer, not %s' % type(sample_factor))
if sample_factor <= 0:
raise ValueError('sample_factor must be positive')
if sample_factor != 1 and np.remainder(sample_factor, 2) != 0:
msg = 'sample_factor odd, and will change ERB filter widths. Use even sample factors for comparison.'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
if padding_size is not None and padding_size >= 1:
signal_length += padding_size
if np.remainder(signal_length, 2) == 0: # even length
n_freqs = signal_length // 2 # .0 does not include DC, likely the sampling grid
max_freq = sr / 2 # go all the way to nyquist
else: # odd length
n_freqs = (signal_length - 1) // 2 # .0
max_freq = sr * (signal_length - 1) / 2 / signal_length # just under nyquist
# verify the high limit is allowed by the sampling rate
if hi_lim > sr / 2:
hi_lim = max_freq
msg = 'input arg "hi_lim" exceeds nyquist limit for max frequency; ignore with "strict=False"'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# changing the sampling density without changing the filter locations
# (and, thereby changing their widths) requires that a certain number of filters
# be used.
n_filters = sample_factor * (n + 1) - 1
n_lp_hp = 2 * sample_factor
freqs = utils.matlab_arange(0, max_freq, n_freqs)
filts = np.zeros((n_freqs + 1 , n_filters + n_lp_hp)) # ?? n_freqs+1
# cutoffs are evenly spaced on an "ref-spaced" scale -- interpolate linearly in "ref-space" then convert back
# get the actual spacing use to generate the sequence (in case numpy does something weird)
center_freqs, erb_spacing = np.linspace(_freq2ref(low_lim), _freq2ref(hi_lim), n_filters + 2, retstep=True) # +2 for bin endpoints
# we need to exclude the endpoints
center_freqs = center_freqs[1:-1]
freqs_erb = _freq2ref(freqs)
for i in range(n_filters):
i_offset = i + sample_factor
l = center_freqs[i] - sample_factor * erb_spacing
h = center_freqs[i] + sample_factor * erb_spacing
# the first sample_factor # of rows in filts will be lowpass filters
filts[(freqs_erb > l) & (freqs_erb < h), i_offset] = make_cosine_filter(freqs_erb, l, h, convert_to_erb=False) # not converting to ERB/ref means we can use freq2lin (and arbitrary "ref-spaced" transforms)
# be sample_factor number of each
for i in range(sample_factor):
# account for the fact that the first sample_factor # of filts are lowpass
i_offset = i + sample_factor
lp_h_ind = max(np.where(freqs < _ref2freq(center_freqs[i]))[0]) # lowpass filter goes up to peak of first cos filter
lp_filt = np.sqrt(1 - np.power(filts[:lp_h_ind+1, i_offset], 2))
hp_l_ind = min(np.where(freqs > _ref2freq(center_freqs[-1-i]))[0]) # highpass filter goes down to peak of last cos filter
hp_filt = np.sqrt(1 - np.power(filts[hp_l_ind:, -1-i_offset], 2))
filts[:lp_h_ind+1, i] = lp_filt
filts[hp_l_ind:, -1-i] = hp_filt
# ensure that squared freq response adds to one
filts = filts / np.sqrt(sample_factor)
# get center freqs for lowpass and highpass filters
cfs_low =
|
np.copy(center_freqs[:sample_factor])
|
numpy.copy
|
import numpy as np
# Thanks goes to <NAME>, PhD, for giving me a huge head start ->
# https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
# Malisiewicz et al.
def non_max_suppression_fast(boxes, iou_thresh=0.5, max_annotations_per_object=2, prefer_highest_iou=True):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# What's the point in running max suppression if we don't have more than a single bounding box to compare
if max_annotations_per_object < 2:
return boxes
# initialize the list of picked indexes
pick = []
idxs = np.argsort(boxes[:, 3])
# keep looping while some indexes still remain in the indexes
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
ii = idxs[last]
iou = intersection_over_union(boxes[idxs], last)
# find all indexes where iou score is greater than threshold
filtered_idxs = np.where(iou > iou_thresh)[0]
# now sort filtered scores (argsort will arrange in ascending order so we'll pick from the end)
filtered_idxs_sorted = filtered_idxs[np.argsort(iou[filtered_idxs])]
# use max_annotations_per_object to set a cap on how many boxes will be removed from consideration
num_annotations_to_remove = min(len(filtered_idxs_sorted), max_annotations_per_object - 1)
deletable_idx = len(filtered_idxs_sorted) - num_annotations_to_remove
filtered_idxs_trimmed = np.concatenate(([last], filtered_idxs_sorted[deletable_idx:]))
# If there are more than 2 competing annotations, choose the annotation
# with the highest iou when compared to all other annotations
if prefer_highest_iou and max_annotations_per_object > 2 and len(
filtered_idxs_trimmed) >= max_annotations_per_object:
filtered_boxes = boxes[idxs[filtered_idxs_trimmed]]
max_idx = 0
max_score = 0
for jj, _ in enumerate(filtered_boxes):
iou = intersection_over_union(filtered_boxes, jj)
new_max = np.amax(iou, 0)
if new_max > max_score:
max_score = new_max
max_idx = jj
pick.append(idxs[filtered_idxs_trimmed[max_idx]])
else:
pick.append(ii)
# delete all indexes from the index list that have been matched
idxs = np.delete(idxs, filtered_idxs_trimmed)
# return only the bounding boxes that were picked
return boxes[pick]
def intersection_over_union(boxes, comparable_index):
comparables = np.delete(boxes, comparable_index, 0)
xx1 = np.maximum(boxes[comparable_index, 0], comparables[:, 0])
yy1 =
|
np.maximum(boxes[comparable_index, 1], comparables[:, 1])
|
numpy.maximum
|
# coding: utf-8
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 11:05:23 2017
@author: zhangji
"""
from matplotlib import pyplot as plt
# plt.rcParams['figure.figsize'] = (18.5, 10.5)
# fontsize = 40
import os
# import glob
import numpy as np
from datetime import datetime
# import matplotlib
import re
from scanf import scanf
from scipy import interpolate, integrate
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d.proj3d import proj_transform
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.colors import Normalize
from matplotlib.ticker import Locator
from matplotlib.collections import LineCollection
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker
# from scipy.optimize import curve_fit
# font = {'size': 20}
# matplotlib.rc('font', **font)
# np.set_printoptions(linewidth=90, precision=5)
markerstyle_list = ['^', 'v', 'o', 's', 'p', 'd', 'H',
'1', '2', '3', '4', '8', 'P', '*',
'h', '+', 'x', 'X', 'D', '|', '_', ]
color_list = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2',
'#7f7f7f', '#bcbd22', '#17becf']
def read_array(text_headle, FILE_DATA, array_length=6):
t_match = re.search(text_headle, FILE_DATA)
if t_match is not None:
t1 = t_match.end()
myformat = ('%f ' * array_length)[:-1]
temp1 = np.array(scanf(myformat, FILE_DATA[t1:]))
else:
temp1 = np.ones(array_length)
temp1[:] = np.nan
return temp1
class fullprint:
'context manager for printing full numpy arrays'
def __init__(self, **kwargs):
kwargs.setdefault('threshold', np.inf)
self.opt = kwargs
def __enter__(self):
self._opt = np.get_printoptions()
np.set_printoptions(**self.opt)
def __exit__(self, type, value, traceback):
np.set_printoptions(**self._opt)
def func_line(x, a0, a1):
y = a0 + a1 * x
return y
def fit_line(ax, x, y, x0, x1, ifprint=1, linestyle='-.', linewidth=1, extendline=False,
color='k', alpha=0.7):
idx = np.array(x >= x0) & np.array(x <= x1) & np.isfinite(x) & np.isfinite(y)
tx = x[idx]
ty = y[idx]
fit_para = np.polyfit(tx, ty, 1)
pol_y = np.poly1d(fit_para)
if extendline:
fit_x = np.linspace(x.min(), x.max(), 100)
else:
fit_x = np.linspace(max(x.min(), x0), min(x.max(), x1), 100)
if ax is not None:
ax.plot(fit_x, pol_y(fit_x), linestyle, linewidth=linewidth,
color=color, alpha=alpha)
if ifprint:
print('y = %f + %f * x' % (fit_para[1], fit_para[0]), 'in range',
(x[idx].min(), x[idx].max()))
return fit_para
def fit_power_law(ax, x, y, x0, x1, ifprint=1, linestyle='-.', linewidth=1, extendline=False,
color='k', alpha=0.7):
idx = np.array(x >= x0) & np.array(x <= x1) & np.isfinite((
|
np.log10(x)
|
numpy.log10
|
import numpy as np
from ..symmetrize import (
symmetrize_bmask,
symmetrize_weight)
def test_symmetrize_weight():
weight = np.ones((5, 5))
weight[:, 0] = 0
symmetrize_weight(weight=weight)
assert np.all(weight[:, 0] == 0)
assert np.all(weight[-1, :] == 0)
def test_symmetrize_weight_no_sym_mask():
weight = np.ones((5, 5))
weight[:, 0] = 0
mask = np.ones_like(weight).astype(bool)
mask[3:5, 0] = False
mask = ~mask
symmetrize_weight(weight=weight, no_sym_mask=mask)
assert np.all(weight[:, 0] == 0)
assert np.all(weight[-1, :3] == 0)
assert np.all(weight[-1, 3:] == 1)
def test_symmetrize_weight_angle():
weight = np.ones((64, 64))
weight[:, 0] = 0
weight_orig = weight.copy()
weights = []
for angle in [45, 90, 135, 180, 225, 270, 315]:
weight_r = weight_orig.copy()
symmetrize_weight(weight=weight_r, angle=angle)
weights.append(weight_r)
msk = weight_r == 0
weight[msk] = 0
assert np.array_equal(weight, np.rot90(weight))
assert np.array_equal(weight, np.rot90(np.rot90(weight)))
assert np.array_equal(weight, np.rot90(np.rot90(np.rot90(weight))))
if False:
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=3, ncols=3)
axs = axs.ravel()
weights.append(weight_orig)
weights.append(weight)
for i, bm in enumerate(weights):
ax = axs[i]
ax.pcolormesh(bm)
ax.set_aspect(1)
import pdb
pdb.set_trace()
def test_symmetrize_weight_angle_no_sym_mask():
weight = np.ones((64, 64))
weight[:, 0] = 0
weight_orig = weight.copy()
mask = np.ones_like(weight).astype(bool)
mask[55:, 0] = False
mask = ~mask
for angle in [90, 180, 270]:
weight_r = weight_orig.copy()
symmetrize_weight(weight=weight_r, angle=angle, no_sym_mask=mask)
assert np.all(weight_r[:, 0] == 0)
if angle == 90:
assert np.all(weight_r[-1, :55] == 0)
assert np.all(weight_r[-1, 55:] == 1)
elif angle == 180:
assert np.all(weight_r[-55:, -1] == 0)
assert np.all(weight_r[:-55, -1] == 1)
elif angle == 270:
assert np.all(weight_r[0, -55:] == 0)
# this test overalps with index [0, 0] that was weight zero start with
assert np.all(weight_r[0, 1:-55] == 1)
else:
assert False, "missed an assert!"
def test_symmetrize_bmask():
bmask = np.zeros((4, 4), dtype=np.int32)
bmask[:, 0] = 1
bmask[:, -2] = 2
symmetrize_bmask(bmask=bmask)
assert np.array_equal(
bmask,
[[1, 0, 2, 0],
[3, 2, 2, 2],
[1, 0, 2, 0],
[1, 1, 3, 1]])
def test_symmetrize_bmask_sym_flags():
bmask = np.zeros((4, 4), dtype=np.int32)
bmask[:, 0] = 1
bmask[:, -2] = 2
bmask[0, -1] = 2**9
symflags = (2**0) | (2**1)
symmetrize_bmask(bmask=bmask, sym_flags=symflags)
assert np.array_equal(
bmask,
[[1, 0, 2, 2**9],
[3, 2, 2, 2],
[1, 0, 2, 0],
[1, 1, 3, 1]])
bmask = np.zeros((4, 4), dtype=np.int32)
bmask[:, 0] = 1
bmask[:, -2] = 2
bmask[0, -1] = 2**9
symmetrize_bmask(bmask=bmask)
assert np.array_equal(
bmask,
[[1 | 2**9, 0, 2, 2**9],
[3, 2, 2, 2],
[1, 0, 2, 0],
[1, 1, 3, 1]])
def test_symmetrize_bmask_no_sym_mask():
bmask =
|
np.zeros((4, 4), dtype=np.int32)
|
numpy.zeros
|
import sys
import os.path
import numpy as np
import pandas
from Sloth.cluster import KMeans
from sklearn.cluster import KMeans as sk_kmeans
from tslearn.datasets import CachedDatasets
from d3m.primitive_interfaces.base import PrimitiveBase, CallResult
from d3m import container, utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
from common_primitives import utils as utils_cp, dataset_to_dataframe as DatasetToDataFrame, dataframe_utils, denormalize
__author__ = 'Distil'
__version__ = '2.0.5'
__contact__ = 'mailto:<EMAIL>'
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Params(params.Params):
pass
class Hyperparams(hyperparams.Hyperparams):
algorithm = hyperparams.Enumeration(default = 'TimeSeriesKMeans',
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
values = ['GlobalAlignmentKernelKMeans', 'TimeSeriesKMeans'],
description = 'type of clustering algorithm to use')
nclusters = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=3, semantic_types=
['https://metadata.datadrivendiscovery.org/types/TuningParameter'], description = 'number of clusters \
to user in kernel kmeans algorithm')
n_init = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=10, semantic_types=
['https://metadata.datadrivendiscovery.org/types/TuningParameter'], description = 'Number of times the k-means algorithm \
will be run with different centroid seeds. Final result will be the best output on n_init consecutive runs in terms of inertia')
pass
class Storc(PrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Primitive that applies kmeans clustering to time series data. Algorithm options are 'GlobalAlignmentKernelKMeans'
or 'TimeSeriesKMeans,' both of which are bootstrapped from the base library tslearn.clustering. This is an unsupervised,
clustering primitive, but has been represented as a supervised classification problem to produce a compliant primitive.
Training inputs: D3M dataset with features and labels, and D3M indices
Outputs: D3M dataset with predicted labels and D3M indices
"""
metadata = metadata_base.PrimitiveMetadata({
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
'id': "77bf4b92-2faa-3e38-bb7e-804131243a7f",
'version': __version__,
'name': "Sloth",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
'keywords': ['Time Series','Clustering'],
'source': {
'name': __author__,
'contact': __contact__,
'uris': [
# Unstructured URIs.
"https://github.com/NewKnowledge/D3M-Unsupervised",
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
'installation': [
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package': 'cython',
'version': '0.29.7',
},
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/NewKnowledge/D3M-Unsupervised.git@{git_commit}#egg=D3MUnsupervised'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),)
}
],
# The same path the primitive is registered with entry points in setup.py.
'python_path': 'd3m.primitives.clustering.k_means.Sloth',
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.K_MEANS_CLUSTERING,
],
'primitive_family': metadata_base.PrimitiveFamily.CLUSTERING,
})
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0)-> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._X_train = None # training inputs
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
'''
fits Kmeans clustering algorithm using training data from set_training_data and hyperparameters
'''
self._kmeans.fit(self._X_train)
return CallResult(None)
def get_params(self) -> Params:
return self._params
def set_params(self, *, params: Params) -> None:
self.params = params
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
'''
Sets primitive's training data
Parameters
----------
inputs: d3m dataset containing training time series
'''
#hyperparams_class = DatasetToDataFrame.DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
#ds2df_client = DatasetToDataFrame.DatasetToDataFramePrimitive(hyperparams = hyperparams_class.defaults().replace({"dataframe_resource":"learningData"}))
#metadata_inputs = ds2df_client.produce(inputs = inputs).value
#formatted_inputs = ds2df_client.produce(inputs = inputs).value
# store information on target, index variable
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
target_names = [list(inputs)[t] for t in targets]
index = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/PrimaryKey')
series = inputs[target_names] != ''
self.clustering = 0
if not series.any().any():
self.clustering = 1
# load and reshape training data
n_ts = len(inputs.d3mIndex.unique())
if n_ts == inputs.shape[0]:
self._kmeans = sk_kmeans(n_clusters = self.hyperparams['nclusters'], n_init = self.hyperparams['n_init'], random_state=self.random_seed)
self._X_train_all_data = inputs.drop(columns = list(inputs)[index[0]])
self._X_train = self._X_train_all_data.drop(columns = target_names).values
else:
self._kmeans = KMeans(self.hyperparams['nclusters'], self.hyperparams['algorithm'])
ts_sz = int(inputs.shape[0] / n_ts)
self._X_train = np.array(inputs.value).reshape(n_ts, ts_sz, 1)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[container.pandas.DataFrame]:
"""
Parameters
----------
inputs : D3M dataframe with associated metadata.
Returns
-------
Outputs
For unsupervised problems: The output is a dataframe containing a single column where each entry is the associated series' cluster number.
For semi-supervised problems: The output is the input df containing an additional feature - cluster_label
"""
#hyperparams_class = DatasetToDataFrame.DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
#ds2df_client = DatasetToDataFrame.DatasetToDataFramePrimitive(hyperparams = hyperparams_class.defaults().replace({"dataframe_resource":"learningData"}))
#metadata_inputs = ds2df_client.produce(inputs = inputs).value
#formatted_inputs = ds2df_client.produce(inputs = inputs).value
# store information on target, index variable
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
target_names = [list(inputs)[t] for t in targets]
index = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/PrimaryKey')
index_names = [list(inputs)[i] for i in index]
# load and reshape training data
n_ts = len(inputs.d3mIndex.unique())
if n_ts == inputs.shape[0]:
X_test = inputs.drop(columns = list(inputs)[index[0]])
X_test = X_test.drop(columns = target_names).values
else:
ts_sz = int(inputs.shape[0] / n_ts)
X_test =
|
np.array(inputs.value)
|
numpy.array
|
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import scipy.signal as scisig
from func import *
np.set_printoptions(linewidth=150)
p = 1; n = 2**p
x = randomvec(n)
hvec = randomvec(n,seedNumber=p)
T = vectorToToeplitz(hvec)
toeplitzConv =
|
np.dot(T,x)
|
numpy.dot
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import copy
import numpy as np
import math
import pandapower.auxiliary as aux
import warnings
from pandapower.build_branch import _initialize_branch_lookup,\
_build_branch_ppc, _switch_branches, _branches_with_oos_buses,\
_calc_tap_from_dataframe,_calc_nominal_ratio_from_dataframe,\
_transformer_correction_factor
from pandapower.build_bus import _build_bus_ppc, _calc_pq_elements_and_add_on_ppc, \
_calc_shunts_and_add_on_ppc, _add_gen_impedances_ppc, _add_motor_impedances_ppc
from pandapower.build_gen import _build_gen_ppc, _check_voltage_setpoints_at_same_bus, \
_check_voltage_angles_at_same_bus, _check_for_reference_bus
from pandapower.opf.make_objective import _make_objective
from pandapower.pypower.idx_area import PRICE_REF_BUS
from pandapower.pypower.idx_brch import F_BUS, T_BUS, BR_STATUS, branch_cols, \
TAP, SHIFT, BR_R, BR_X, BR_B
from pandapower.pypower.idx_bus import NONE, BUS_I, BUS_TYPE, BASE_KV, GS, BS
from pandapower.pypower.idx_gen import GEN_BUS, GEN_STATUS
from pandapower.pypower.run_userfcn import run_userfcn
def _pd2ppc(net, sequence=None):
"""
Converter Flow:
1. Create an empty pypower datatructure
2. Calculate loads and write the bus matrix
3. Build the gen (Infeeder)- Matrix
4. Calculate the line parameter and the transformer parameter,
and fill it in the branch matrix.
Order: 1st: Line values, 2nd: Trafo values
5. if opf: make opf objective (gencost)
6. convert internal ppci format for pypower powerflow /
opf without out of service elements and rearanged buses
INPUT:
**net** - The pandapower format network
**sequence** - Used for three phase analysis
( 0 - Zero Sequence
1 - Positive Sequence
2 - Negative Sequence
)
OUTPUT:
**ppc** - The simple matpower format network. Which consists of:
ppc = {
"baseMVA": 1., *float*
"version": 2, *int*
"bus": np.array([], dtype=float),
"branch": np.array([], dtype=np.complex128),
"gen": np.array([], dtype=float),
"gencost" = np.array([], dtype=float), only for OPF
"internal": {
"Ybus": np.array([], dtype=np.complex128)
, "Yf": np.array([], dtype=np.complex128)
, "Yt": np.array([], dtype=np.complex128)
, "branch_is": np.array([], dtype=bool)
, "gen_is": np.array([], dtype=bool)
}
**ppci** - The "internal" pypower format network for PF calculations
"""
# select elements in service (time consuming, so we do it once)
net["_is_elements"] = aux._select_is_elements_numba(net, sequence=sequence)
# Gets network configurations
mode = net["_options"]["mode"]
check_connectivity = net["_options"]["check_connectivity"]
calculate_voltage_angles = net["_options"]["calculate_voltage_angles"]
ppc = _init_ppc(net, mode=mode, sequence=sequence)
# generate ppc['bus'] and the bus lookup
_build_bus_ppc(net, ppc)
if sequence == 0:
# Adds external grid impedance for 3ph and sc calculations in ppc0
_add_ext_grid_sc_impedance_zero(net, ppc)
# Calculates ppc0 branch impedances from branch elements
_build_branch_ppc_zero(net, ppc)
else:
# Calculates ppc1/ppc2 branch impedances from branch elements
_build_branch_ppc(net, ppc)
# Adds P and Q for loads / sgens in ppc['bus'] (PQ nodes)
if mode == "sc":
_add_gen_impedances_ppc(net, ppc)
_add_motor_impedances_ppc(net, ppc)
else:
_calc_pq_elements_and_add_on_ppc(net, ppc, sequence=sequence)
# adds P and Q for shunts, wards and xwards (to PQ nodes)
_calc_shunts_and_add_on_ppc(net, ppc)
# adds auxilary buses for open switches at branches
_switch_branches(net, ppc)
# Adds auxilary buses for in service lines with out of service buses.
# Also deactivates lines if they are connected to two out of service buses
_branches_with_oos_buses(net, ppc)
if check_connectivity:
if sequence in [None, 1, 2]:
# sets islands (multiple isolated nodes) out of service
if "opf" in mode:
net["_isolated_buses"], _, _ = aux._check_connectivity_opf(ppc)
else:
net["_isolated_buses"], _, _ = aux._check_connectivity(ppc)
net["_is_elements_final"] = aux._select_is_elements_numba(net,
net._isolated_buses, sequence)
else:
ppc["bus"][net._isolated_buses, BUS_TYPE] = NONE
net["_is_elements"] = net["_is_elements_final"]
else:
# sets buses out of service, which aren't connected to branches / REF buses
aux._set_isolated_buses_out_of_service(net, ppc)
_build_gen_ppc(net, ppc)
if "pf" in mode:
_check_for_reference_bus(ppc)
aux._replace_nans_with_default_limits(net, ppc)
# generates "internal" ppci format (for powerflow calc)
# from "external" ppc format and updates the bus lookup
# Note: Also reorders buses and gens in ppc
ppci = _ppc2ppci(ppc, net)
if mode == "pf":
# check if any generators connected to the same bus have different voltage setpoints
_check_voltage_setpoints_at_same_bus(ppc)
if calculate_voltage_angles:
_check_voltage_angles_at_same_bus(net, ppci)
if mode == "opf":
# make opf objective
ppci = _make_objective(ppci, net)
return ppc, ppci
def _init_ppc(net, mode="pf", sequence=None):
# init empty ppc
ppc = {"baseMVA": net.sn_mva
, "version": 2
, "bus": np.array([], dtype=float)
, "branch": np.array([], dtype=np.complex128)
, "gen": np.array([], dtype=float)
, "internal": {
"Ybus": np.array([], dtype=np.complex128)
, "Yf": np.array([], dtype=np.complex128)
, "Yt": np.array([], dtype=np.complex128)
, "branch_is": np.array([], dtype=bool)
, "gen_is": np.array([], dtype=bool)
, "DLF": np.array([], dtype=np.complex128)
, "buses_ord_bfs_nets": np.array([], dtype=float)
}
}
if mode == "opf":
# additional fields in ppc
ppc["gencost"] = np.array([], dtype=float)
net["_ppc"] = ppc
if sequence is None:
net["_ppc"] = ppc
else:
ppc["sequence"] = int(sequence)
net["_ppc%s" % sequence] = ppc
return ppc
def _ppc2ppci(ppc, net, ppci=None):
"""
Creates the ppci which is used to run the power flow / OPF...
The ppci is similar to the ppc except that:
1. it contains no out of service elements
2. buses are sorted
Parameters
----------
ppc - the ppc
net - the pandapower net
Returns
-------
ppci - the "internal" ppc
"""
# get empty ppci
if ppci is None:
ppci = _init_ppc(net, mode=net["_options"]["mode"])
# BUS Sorting and lookups
# get bus_lookup
bus_lookup = net["_pd2ppc_lookups"]["bus"]
# get OOS busses and place them at the end of the bus array
# (there are no OOS busses in the ppci)
oos_busses = ppc['bus'][:, BUS_TYPE] == NONE
ppci['bus'] = ppc['bus'][~oos_busses]
# in ppc the OOS busses are included and at the end of the array
ppc['bus'] =
|
np.vstack([ppc['bus'][~oos_busses], ppc['bus'][oos_busses]])
|
numpy.vstack
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd(object):
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] +=
|
tril(A_full)
|
numpy.tril
|
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import pickle
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from secdata.Query import StockQuery
from secdata.StockData import StockData
from secdata.sentiment_analyser import text_processing as tp
from secdata.sentiment_analyser import utils
from secdata.sentiment_analyser.NewsAnalyser import NewsAnalyser
from secdata.sentiment_analyser.sentiment_models.GeneralInquirer import GeneralInquirer
from secdata.sentiment_analyser.sentiment_models.WordGraph import WordGraph
logging.getLogger(__name__)
logging.basicConfig(format="[%(name)s]%(levelname)s: %(message)s", level=logging.INFO)
# Word-Graph test settings
retrain_feature_extractor = False # If true retrains the graph model for sentiment (about 30 to 60 seconds)
retrain_classifier = False
regenerate_train_data = False
regenerate_test_data = False
# General inquirer test settings
test_general_inquirer = False
def custom_norm(array):
std = np.std(array)
mean = np.mean(array)
for i in range(len(array)):
devs_away = (np.floor(np.abs(array[i] - mean) / std) + 1.0)
array[i] = (array[i] - mean) / (1.4 * devs_away) + mean
return array
queries_train = [StockQuery(ticker="amd"),
StockQuery(ticker="amzn"),
StockQuery(ticker="intc"),
StockQuery(ticker="jpm"),
StockQuery(ticker="mmm"),
StockQuery(ticker="googl"),
StockQuery(ticker="nflx"),
StockQuery(ticker="nvda"),
StockQuery(ticker="tsla"),
StockQuery(ticker="vrx"),
StockQuery(ticker="wmt")]
queries_test = [StockQuery(ticker="aapl"),
StockQuery(ticker="msft")]
change_threshold_p = 0.011 # % increase threshold for day to be positive
change_threshold_n = 0.011 # % increase threshold for day to be negative
news_analyser = NewsAnalyser()
print("-----------------------------------------------------------------")
print(" Test classification performance of the graph-based model")
print("-----------------------------------------------------------------")
if retrain_feature_extractor:
print("Training feature extractor")
news_analyser.clear_resources() # Empty the resources dir, else words may be counted twice
for query in queries_train:
print("\tTraining with {}".format(query.ticker))
stock_data = StockData(query, "./new_database")
news = stock_data.get_all_news(pair_with_cols=["relative_intraday"])
for date, contents in news:
collapsed_news = utils.collapse_daily_news(contents['news'])
if contents['relative_intraday'] >= change_threshold_p:
label = WordGraph.POSITIVE
elif contents['relative_intraday'] <= -change_threshold_n:
label = WordGraph.NEGATIVE
else:
label = WordGraph.UNCERTAIN
news_analyser.train_iter(collapsed_news, label)
news_analyser.save()
if regenerate_train_data:
trainX = []
trainY = []
counter_1 = 0
print("Generating training data")
for query in queries_train:
print("\tGenerating for {}".format(query.ticker))
stock_data = StockData(query, "./new_database")
news = stock_data.get_all_news(pair_with_cols=["relative_intraday"])
for _, contents in news:
collapsed_news = utils.collapse_daily_news(contents['news'])
counter_1 += 1
if contents['relative_intraday'] >= change_threshold_p:
label = WordGraph.POSITIVE
elif contents['relative_intraday'] <= -change_threshold_n:
label = WordGraph.NEGATIVE
else:
label = WordGraph.UNCERTAIN
score = news_analyser.analyse(collapsed_news)
trainX.append(score)
trainY.append(label)
pickle.dump((trainX, trainY), open("train_data.pickle", "wb"))
trainX, trainY = pickle.load(open("train_data.pickle", "rb"))
trainX = custom_norm(trainX)
if retrain_classifier:
print("Training classifier")
classifier = KNeighborsClassifier(n_neighbors=1)
# classifier = SVC(gamma=2, C=1)
# classifier = RandomForestClassifier(max_depth=5)
classifier.fit(trainX, trainY)
pickle.dump(classifier, open("classifier.pickle", "wb"))
classifier = pickle.load(open("classifier.pickle", "rb"))
if regenerate_test_data:
testX = []
testY = []
print("Generating test data")
for query in queries_test:
print("\tGenerating for {}".format(query.ticker))
stock_data = StockData(query, "./new_database")
news = stock_data.get_all_news(pair_with_cols=["relative_intraday"])
for _, contents in news:
collapsed_news = utils.collapse_daily_news(contents['news'])
score = news_analyser.analyse(collapsed_news)
testX.append(score)
if contents['relative_intraday'] >= change_threshold_p:
label = WordGraph.POSITIVE
elif contents['relative_intraday'] <= -change_threshold_n:
label = WordGraph.NEGATIVE
else:
label = WordGraph.UNCERTAIN
testY.append(label)
pickle.dump((testX, testY), open("test_data.pickle", "wb"))
testX, testY = pickle.load(open("test_data.pickle", "rb"))
testX = custom_norm(testX)
classifications = classifier.predict(testX)
bad_classifications = (np.array(testY) - classifications)
bad_classifications[bad_classifications < 0] = 0 # Remove those -1 where classification did not coincide with testY
accuracies = 1.0 - (np.sum(bad_classifications, axis=0) / np.sum(testY, axis=0))
print("\nPrecision of graph model")
print(" {:3.2f}% of positive classified correctly".format(100 * accuracies[0]))
print(" {:3.2f}% of negative classified correctly".format(100 * accuracies[1]))
print(" {:3.2f}% of uncertain classified correctly".format(100 * accuracies[2]))
print(" {:3.2f}% overall accuracy".format(100 * np.average(accuracies)))
if test_general_inquirer:
print("\n------------------------------------------------------------------")
print(" Test classification performance of the General Inquirer model")
print("------------------------------------------------------------------")
queries_test = [StockQuery(ticker="aapl"),
StockQuery(ticker="msft")]
trainX = []
trainY = []
testX = []
testY = []
print("Generating training data")
gi_model = GeneralInquirer()
for query in queries_train:
print("\tGenerating for {}".format(query.ticker))
stock_data = StockData(query, "./new_database")
news = stock_data.get_all_news(pair_with_cols=["relative_intraday"])
for _, contents in news:
collapsed_news = utils.collapse_daily_news(contents['news'])
score = gi_model.find_score(tp.tokenize(collapsed_news))
trainX.append(score)
if contents['relative_intraday'] >= change_threshold_p:
label = WordGraph.POSITIVE
elif contents['relative_intraday'] <= -change_threshold_n:
label = WordGraph.NEGATIVE
else:
label = WordGraph.UNCERTAIN
trainY.append(label)
print("Generating test data")
gi_model = GeneralInquirer()
for query in queries_test:
print("\tGenerating for {}".format(query.ticker))
stock_data = StockData(query, "./new_database")
news = stock_data.get_all_news(pair_with_cols=["relative_intraday"])
for _, contents in news:
collapsed_news = utils.collapse_daily_news(contents['news'])
score = gi_model.find_score(tp.tokenize(collapsed_news))
testX.append(score)
if contents['relative_intraday'] >= change_threshold_p:
label = WordGraph.POSITIVE
elif contents['relative_intraday'] <= -change_threshold_n:
label = WordGraph.NEGATIVE
else:
label = WordGraph.UNCERTAIN
testY.append(label)
# pickle.dump((testX, testY), open("test_data.pickle", "wb"))
trainX = custom_norm(trainX)
testX = custom_norm(testX)
classifier = KNeighborsClassifier(n_neighbors=1)
classifier.fit(trainX, trainY)
classifications = classifier.predict(testX)
bad_classifications = (np.array(testY) - classifications)
bad_classifications[bad_classifications < 0] = 0 # Remove those -1 where classification did not coincide with testY
accuracies = 1.0 - (
|
np.sum(bad_classifications, axis=0)
|
numpy.sum
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# =============================================================================
""" Test Quantum Gradient Framework """
import unittest
from test.python.opflow import QiskitOpflowTestCase
from itertools import product
import numpy as np
from ddt import ddt, data, idata, unpack
from sympy import Symbol, cos
try:
import jax.numpy as jnp
_HAS_JAX = True
except ImportError:
_HAS_JAX = False
from qiskit import QuantumCircuit, QuantumRegister, BasicAer
from qiskit.test import slow_test
from qiskit.utils import QuantumInstance
from qiskit.exceptions import MissingOptionalLibraryError
from qiskit.utils import algorithm_globals
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import CG
from qiskit.opflow import I, X, Y, Z, StateFn, CircuitStateFn, ListOp, CircuitSampler
from qiskit.opflow.gradients import Gradient, NaturalGradient, Hessian
from qiskit.opflow.gradients.qfi import QFI
from qiskit.opflow.gradients.circuit_qfis import LinCombFull, OverlapBlockDiag, OverlapDiag
from qiskit.circuit import Parameter, ParameterExpression
from qiskit.circuit import ParameterVector
from qiskit.circuit.library import RealAmplitudes
@ddt
class TestGradients(QiskitOpflowTestCase):
""" Test Qiskit Gradient Framework """
def setUp(self):
super().setUp()
algorithm_globals.random_seed = 50
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_p(self, method):
"""Test the state gradient for p
|psi> = 1/sqrt(2)[[1, exp(ia)]]
Tr(|psi><psi|Z) = 0
Tr(|psi><psi|X) = cos(a)
d<H>/da = - 0.5 sin(a)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
params = a
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.p(a, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]
correct_values = [-0.5 / np.sqrt(2), 0, -0.5]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_u(self, method):
"""Test the state gradient for U
Tr(|psi><psi|Z) = - 0.5 sin(a)cos(c)
Tr(|psi><psi|X) = cos^2(a/2) cos(b+c) - sin^2(a/2) cos(b-c)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
b = Parameter('b')
c = Parameter('c')
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.u(a, b, c, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
params = [a, b, c]
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: 0, c: 0}, {a: np.pi / 4, b: np.pi / 4, c: np.pi / 4}]
correct_values = [[0.3536, 0, 0], [0.3232, -0.42678, -0.92678]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
# Tr(|psi><psi|Z) = - 0.5 sin(a)cos(c)
# Tr(|psi><psi|X) = cos^2(a/2) cos(b+c) - sin^2(a/2) cos(b-c)
# dTr(|psi><psi|H)/da = 0.5(cos(2a)) + 0.5()
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.u(a, a, a, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
params = [a]
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]
correct_values = [[-1.03033], [-1]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_rxx(self, method):
"""Test the state gradient for XX rotation
"""
ham = Z ^ X
a = Parameter('a')
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.h(q[0])
qc.rxx(a, q[0], q[1])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
params = [a]
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]
correct_values = [[-0.707], [-1.]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_ryy(self, method):
# pylint: disable=wrong-spelling-in-comment
"""Test the state gradient for YY rotation
"""
ham = Y ^ Y
a = Parameter('a')
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.ryy(a, q[0], q[1])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=a)
values_dict = [{a: np.pi / 8}, {a: np.pi}]
correct_values = [[0], [0]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_rzz(self, method):
# pylint: disable=wrong-spelling-in-comment
"""Test the state gradient for ZZ rotation
"""
ham = Z ^ X
a = Parameter('a')
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.h(q[0])
qc.rzz(a, q[0], q[1])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
params = [a]
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]
correct_values = [[-0.707], [-1.]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_gradient_rzx(self, method):
"""Test the state gradient for ZX rotation
"""
ham = Z ^ Z
a = Parameter('a')
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.h(q)
qc.rzx(a, q[0], q[1])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
params = [a]
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 8}, {a: np.pi / 2}]
correct_values = [[0.], [0.]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_gradient1(self, method):
"""Test the state gradient
Tr(|psi><psi|Z) = sin(a)sin(b)
Tr(|psi><psi|X) = cos(a)
d<H>/da = - 0.5 sin(a) - 1 cos(a)sin(b)
d<H>/db = - 1 sin(a)cos(b)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
b = Parameter('b')
params = [a, b]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: np.pi}, {params[0]: np.pi / 4, params[1]: np.pi / 4},
{params[0]: np.pi / 2, params[1]: np.pi / 4}]
correct_values = [[-0.5 / np.sqrt(2), 1 / np.sqrt(2)], [-0.5 / np.sqrt(2) - 0.5, -1 / 2.],
[-0.5, -1 / np.sqrt(2)]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_gradient2(self, method):
"""Test the state gradient 2
Tr(|psi><psi|Z) = sin(a)sin(a)
Tr(|psi><psi|X) = cos(a)
d<H>/da = - 0.5 sin(a) - 2 cos(a)sin(a)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
# b = Parameter('b')
params = [a]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(a, q[0])
qc.rx(a, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: 0},
{a: np.pi / 2}]
correct_values = [-1.353553, -0, -0.5]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_gradient3(self, method):
"""Test the state gradient 3
Tr(|psi><psi|Z) = sin(a)sin(c(a)) = sin(a)sin(cos(a)+1)
Tr(|psi><psi|X) = cos(a)
d<H>/da = - 0.5 sin(a) - 1 cos(a)sin(cos(a)+1) + 1 sin^2(a)cos(cos(a)+1)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
# b = Parameter('b')
params = a
x = Symbol('x')
expr = cos(x) + 1
c = ParameterExpression({a: x}, expr)
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(a, q[0])
qc.rx(c, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]
correct_values = [-1.1220, -0.9093, 0.0403]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_gradient4(self, method):
"""Test the state gradient 4
Tr(|psi><psi|ZX) = -cos(a)
daTr(|psi><psi|ZX) = sin(a)
"""
ham = X ^ Z
a = Parameter('a')
params = a
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.x(q[0])
qc.h(q[1])
qc.crz(a, q[0], q[1])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4}, {a: 0},
{a: np.pi / 2}]
correct_values = [1 / np.sqrt(2), 0, 1]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_gradient5(self, method):
"""Test the state gradient
Tr(|psi><psi|Z) = sin(a0)sin(a1)
Tr(|psi><psi|X) = cos(a0)
d<H>/da0 = - 0.5 sin(a0) - 1 cos(a0)sin(a1)
d<H>/da1 = - 1 sin(a0)cos(a1)
"""
ham = 0.5 * X - 1 * Z
a = ParameterVector('a', 2)
params = a
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: [np.pi / 4, np.pi]}, {a: [np.pi / 4, np.pi / 4]},
{a: [np.pi / 2, np.pi / 4]}]
correct_values = [[-0.5 / np.sqrt(2), 1 / np.sqrt(2)], [-0.5 / np.sqrt(2) - 0.5, -1 / 2.],
[-0.5, -1 / np.sqrt(2)]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_hessian(self, method):
"""Test the state Hessian
Tr(|psi><psi|Z) = sin(a)sin(b)
Tr(|psi><psi|X) = cos(a)
d^2<H>/da^2 = - 0.5 cos(a) + 1 sin(a)sin(b)
d^2<H>/dbda = - 1 cos(a)cos(b)
d^2<H>/dbda = - 1 cos(a)cos(b)
d^2<H>/db^2 = + 1 sin(a)sin(b)
"""
ham = 0.5 * X - 1 * Z
params = ParameterVector('a', 2)
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
state_hess = Hessian(hess_method=method).convert(operator=op, params=params)
values_dict = [{params[0]: np.pi / 4, params[1]: np.pi},
{params[0]: np.pi / 4, params[1]: np.pi / 4}]
correct_values = [[[-0.5 / np.sqrt(2), 1 / np.sqrt(2)], [1 / np.sqrt(2), 0]],
[[-0.5 / np.sqrt(2) + 0.5, -1 / 2.], [-1 / 2., 0.5]]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_hess.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@unittest.skipIf(not _HAS_JAX, 'Skipping test due to missing jax module.')
@data('lin_comb', 'param_shift', 'fin_diff')
def test_state_hessian_custom_combo_fn(self, method):
"""Test the state Hessian with on an operator which includes
a user-defined combo_fn.
Tr(|psi><psi|Z) = sin(a)sin(b)
Tr(|psi><psi|X) = cos(a)
d^2<H>/da^2 = - 0.5 cos(a) + 1 sin(a)sin(b)
d^2<H>/dbda = - 1 cos(a)cos(b)
d^2<H>/dbda = - 1 cos(a)cos(b)
d^2<H>/db^2 = + 1 sin(a)sin(b)
"""
ham = 0.5 * X - 1 * Z
a = Parameter('a')
b = Parameter('b')
params = [(a, a), (a, b), (b, b)]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(a, q[0])
qc.rx(b, q[0])
op = ListOp([~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)],
combo_fn=lambda x: x[0] ** 3 + 4 * x[0])
state_hess = Hessian(hess_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: np.pi},
{a: np.pi / 4, b: np.pi / 4},
{a: np.pi / 2, b: np.pi / 4}]
correct_values = [[-1.28163104, 2.56326208, 1.06066017],
[-0.04495626, -2.40716991, 1.8125],
[2.82842712, -1.5, 1.76776695]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_hess.assign_parameters(value_dict).eval(),
correct_values[i], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_prob_grad(self, method):
"""Test the probability gradient
dp0/da = cos(a)sin(b) / 2
dp1/da = - cos(a)sin(b) / 2
dp0/db = sin(a)cos(b) / 2
dp1/db = - sin(a)cos(b) / 2
"""
a = Parameter('a')
b = Parameter('b')
params = [a, b]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
op = CircuitStateFn(primitive=qc, coeff=1.)
prob_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: 0}, {params[0]: np.pi / 4, params[1]: np.pi / 4},
{params[0]: np.pi / 2, params[1]: np.pi}]
correct_values = [[[0, 0], [1 / (2 * np.sqrt(2)), - 1 / (2 * np.sqrt(2))]],
[[1 / 4, - 1 / 4], [1 / 4, - 1 / 4]],
[[0, 0], [- 1 / 2, 1 / 2]]]
for i, value_dict in enumerate(values_dict):
for j, prob_grad_result in enumerate(prob_grad.assign_parameters(value_dict).eval()):
np.testing.assert_array_almost_equal(prob_grad_result,
correct_values[i][j], decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_prob_hess(self, method):
"""Test the probability Hessian using linear combination of unitaries method
d^2p0/da^2 = - sin(a)sin(b) / 2
d^2p1/da^2 = sin(a)sin(b) / 2
d^2p0/dadb = cos(a)cos(b) / 2
d^2p1/dadb = - cos(a)cos(b) / 2
"""
a = Parameter('a')
b = Parameter('b')
params = [(a, a), (a, b)]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(a, q[0])
qc.rx(b, q[0])
op = CircuitStateFn(primitive=qc, coeff=1.)
prob_hess = Hessian(hess_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: 0}, {a: np.pi / 4, b: np.pi / 4},
{a: np.pi / 2, b: np.pi}]
correct_values = [[[0, 0], [1 / (2 * np.sqrt(2)), - 1 / (2 * np.sqrt(2))]],
[[- 1 / 4, 1 / 4], [1 / 4, - 1 / 4]],
[[0, 0], [0, 0]]]
for i, value_dict in enumerate(values_dict):
for j, prob_hess_result in enumerate(prob_hess.assign_parameters(value_dict).eval()):
np.testing.assert_array_almost_equal(prob_hess_result,
correct_values[i][j], decimal=1)
@idata(product(['lin_comb', 'param_shift', 'fin_diff'],
[None, 'lasso', 'ridge', 'perturb_diag', 'perturb_diag_elements']))
@unpack
def test_natural_gradient(self, method, regularization):
"""Test the natural gradient"""
try:
for params in (ParameterVector('a', 2),
[Parameter('a'), Parameter('b')]):
ham = 0.5 * X - 1 * Z
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
nat_grad = NaturalGradient(grad_method=method, regularization=regularization)\
.convert(operator=op, params=params)
values_dict = [{params[0]: np.pi / 4, params[1]: np.pi / 2}]
correct_values = [[-2.36003979, 2.06503481]] \
if regularization == 'ridge' else [[-4.2, 0]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(
nat_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=0)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_natural_gradient2(self):
"""Test the natural gradient 2"""
with self.assertRaises(TypeError):
_ = NaturalGradient().convert(None, None)
@idata(zip(['lin_comb_full', 'overlap_block_diag', 'overlap_diag'],
[LinCombFull, OverlapBlockDiag, OverlapDiag]))
@unpack
def test_natural_gradient3(self, qfi_method, circuit_qfi):
"""Test the natural gradient 3"""
nat_grad = NaturalGradient(qfi_method=qfi_method)
self.assertIsInstance(nat_grad.qfi_method, circuit_qfi)
@idata(product(['lin_comb', 'param_shift', 'fin_diff'],
['lin_comb_full', 'overlap_block_diag', 'overlap_diag'],
[None, 'ridge', 'perturb_diag', 'perturb_diag_elements']))
@unpack
def test_natural_gradient4(self, grad_method, qfi_method, regularization):
"""Test the natural gradient 4"""
# Avoid regularization = lasso intentionally because it does not converge
try:
ham = 0.5 * X - 1 * Z
a = Parameter('a')
params = a
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(a, q[0])
op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.)
nat_grad = NaturalGradient(grad_method=grad_method,
qfi_method=qfi_method,
regularization=regularization).convert(operator=op,
params=params)
values_dict = [{a: np.pi / 4}]
correct_values = [[0.]] if regularization == 'ridge' else [[-1.41421342]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(nat_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=0)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
@unittest.skipIf(not _HAS_JAX, 'Skipping test due to missing jax module.')
@idata(product(['lin_comb', 'param_shift', 'fin_diff'], [True, False]))
@unpack
def test_jax_chain_rule(self, method: str, autograd: bool):
"""Test the chain rule functionality using Jax
d<H>/d<X> = 2<X>
d<H>/d<Z> = - sin(<Z>)
<Z> = Tr(|psi><psi|Z) = sin(a)sin(b)
<X> = Tr(|psi><psi|X) = cos(a)
d<H>/da = d<H>/d<X> d<X>/da + d<H>/d<Z> d<Z>/da = - 2 cos(a)sin(a)
- sin(sin(a)sin(b)) * cos(a)sin(b)
d<H>/db = d<H>/d<X> d<X>/db + d<H>/d<Z> d<Z>/db = - sin(sin(a)sin(b)) * sin(a)cos(b)
"""
a = Parameter('a')
b = Parameter('b')
params = [a, b]
q = QuantumRegister(1)
qc = QuantumCircuit(q)
qc.h(q)
qc.rz(params[0], q[0])
qc.rx(params[1], q[0])
def combo_fn(x):
return jnp.power(x[0], 2) + jnp.cos(x[1])
def grad_combo_fn(x):
return np.array([2 * x[0], -np.sin(x[1])])
op = ListOp([~StateFn(X) @ CircuitStateFn(primitive=qc, coeff=1.),
~StateFn(Z) @ CircuitStateFn(primitive=qc, coeff=1.)], combo_fn=combo_fn,
grad_combo_fn=None if autograd else grad_combo_fn)
state_grad = Gradient(grad_method=method).convert(operator=op, params=params)
values_dict = [{a: np.pi / 4, b: np.pi}, {params[0]: np.pi / 4, params[1]: np.pi / 4},
{params[0]: np.pi / 2, params[1]: np.pi / 4}]
correct_values = [[-1., 0.], [-1.2397, -0.2397], [0, -0.45936]]
for i, value_dict in enumerate(values_dict):
np.testing.assert_array_almost_equal(state_grad.assign_parameters(value_dict).eval(),
correct_values[i],
decimal=1)
@data('lin_comb', 'param_shift', 'fin_diff')
def test_grad_combo_fn_chain_rule(self, method):
"""Test the chain rule for a custom gradient combo function."""
np.random.seed(2)
def combo_fn(x):
amplitudes = x[0].primitive.data
pdf = np.multiply(amplitudes, np.conj(amplitudes))
return np.sum(np.log(pdf)) / (-len(amplitudes))
def grad_combo_fn(x):
amplitudes = x[0].primitive.data
pdf = np.multiply(amplitudes,
|
np.conj(amplitudes)
|
numpy.conj
|
# -*- coding: utf-8 -*-
import pymc3 as pm
import theano
import theano.tensor as tt
import numpy as np
import pandas as pd
import isoweek
import pickle as pkl
import datetime
import time
from collections import OrderedDict
from matplotlib import pyplot as pp
from geo_utils import jacobian_sq
# BUG: may throw an error for flat RVs
theano.config.compute_test_value = 'off'
def uniform_times_by_week(weeks, n=500):
""" Samples n random timepoints within a week, per week. converts times to datetime obj."""
res = OrderedDict()
for week in weeks:
time_min = datetime.datetime.combine(
isoweek.Week(*week).monday(), datetime.time.min)
time_max = datetime.datetime.combine(
isoweek.Week(*week).sunday(), datetime.time.max)
res[week] = np.random.rand(n) * (time_max - time_min) + time_min
return res
def uniform_times_by_day(days, n=500):
""" Samples n random timepoints within a day, per day. converts pd.Timestamps to datetime obj."""
res = OrderedDict()
for day in days:
time_min = datetime.datetime.combine(day, datetime.time.min)
time_max = datetime.datetime.combine(day, datetime.time.max)
res[day] = np.random.rand(n) * (time_max - time_min) + time_min
return res
def uniform_locations_by_county(counties, n=500):
res = OrderedDict()
for (county_id, county) in counties.items():
tp = county["testpoints"]
if n == len(tp):
res[county_id] = tp
else:
idx = np.random.choice(tp.shape[0], n, replace=n > len(tp))
res[county_id] = tp[idx]
return res
def sample_time_and_space(data, times_by_day, locations_by_county):
n_total = data.sum().sum()
t_all = np.empty((n_total,), dtype=object)
x_all = np.empty((n_total, 2))
i = 0
for (county_id, series) in data.iteritems():
for (day, n) in series.iteritems():
# draw n random times
times = times_by_day[day]
idx = np.random.choice(len(times), n)
t_all[i:i + n] = times[idx]
# draw n random locations
locs = locations_by_county[county_id]
idx = np.random.choice(locs.shape[0], n)
x_all[i:i + n, :] = locs[idx, :]
i += n
return t_all, x_all
def gaussian_bf(dx, σ):
""" spatial basis function """
σ = np.float32(σ)
res = tt.zeros_like(dx)
idx = (abs(dx) < np.float32(5) * σ) # .nonzero()
return tt.set_subtensor(res[idx], tt.exp(
np.float32(-0.5 / (σ**2)) * (dx[idx])**2) / np.float32(np.sqrt(2 * np.pi * σ**2)))
def gaussian_gram(σ):
return np.array([[
|
np.power(2 * np.pi * (a**2 + b**2), -0.5)
|
numpy.power
|
import numpy as np
import collections
import WardClustering
import StatsUtil
import InitializeAndRunddCRP as initdd
from multiprocessing import Pool
# Format of generated synthetic datasets
SynthData = collections.namedtuple('SynthData',['D','adj_list','z','coords'])
# Main function: Computes a parcellation of synthetic data at different noise
# levels, using Ward Clustering and our method based on the ddCRP. Each
# parcellation is evaluated based on its Normalized Mututal Information
# with the ground truth. The input "type"={'square','stripes','face'}
# determines the underlying ground truth parcellation.
def LearnSynth(type):
np.random.seed(1) # For repeatability
max_noise = 10; # Number of noise levels to try
repeats = 5; # Number of times to repeat experiments
WC = np.zeros((max_noise,repeats))
DC = np.zeros((max_noise,repeats))
DC_K = np.zeros((max_noise,repeats))
for rep in range(repeats):
print('Repeat #' + str(rep))
all_synth = [GenerateSynthData(type, noise_sig)
for noise_sig in range(max_noise)]
# Run all noise levels in parallel
p = Pool(processes=max_noise)
all_res = p.map(LearnSynthForDataset, all_synth)
p.close()
p.join()
WC[:,rep] = [res[0] for res in all_res]
DC[:,rep] = [res[1] for res in all_res]
DC_K[:,rep] = [res[2] for res in all_res]
return (WC, DC, DC_K)
# Compute Ward clustering and our parcellation for a specific synthetic
# (previously generated) dataset
def LearnSynthForDataset(synth):
# Hyperparameters
alpha = 10;
kappa = 0.0001;
nu = 1;
sigsq = 0.01;
pass_limit = 30;
D = NormalizeConn(synth.D) # Normalize connectivity to zero mean, unit var
# Compute our ddCRP-based parcellation
Z = WardClustering.ClusterTree(D, synth.adj_list)
_,dd_stats = initdd.InitializeAndRun(Z, D, synth.adj_list, range(1,21),
alpha, kappa, nu, sigsq, pass_limit, synth.z, 0)
DC = dd_stats['NMI'][-1]
DC_K = dd_stats['K'][-1]
# Ward Clustering, using number of clusters discovered from our method
WC = StatsUtil.NMI(synth.z, WardClustering.Cluster(Z, DC_K))
return (WC,DC,DC_K)
# Generate synthetic dataset of "type"={'square','stripes','face'} at a given
# noise level "sig". Returns a SynthData object containing a connectivity
# matrix D, and adjacency list adj_list, ground truth parcellation z, and
# element coordinates coords
def GenerateSynthData(type, sig):
sqrtN = 18
coords = np.zeros((sqrtN**2,2))
adj_list = np.empty(sqrtN**2, dtype=object)
for r in range(0, sqrtN):
for c in range(0, sqrtN):
currVox = c + r*sqrtN
coords[currVox,:] = [r, c]
curr_adj = []
if r > 0:
curr_adj.append(c + (r-1)*sqrtN)
if r < (sqrtN-1):
curr_adj.append(c + (r+1)*sqrtN)
if c > 0:
curr_adj.append((c-1) + r*sqrtN)
if c < (sqrtN-1):
curr_adj.append((c+1) + r*sqrtN)
adj_list[currVox] = np.array(curr_adj)
if type == 'square':
z = np.array([
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8])
elif type == 'stripes':
z = np.array([
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
0,0,0,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,
0,0,0,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,
0,0,0,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,
0,0,0,1,1,1,2,2,2,3,3,3,3,3,3,3,3,3,
0,0,0,1,1,1,2,2,2,3,3,3,3,3,3,3,3,3,
0,0,0,1,1,1,2,2,2,3,3,3,3,3,3,3,3,3,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,4,4,4,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,4,4,4,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,4,4,4,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,
0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5])
elif type == 'face':
z = np.array([
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
0,0,0,0,0,0,3,3,3,3,3,3,6,6,6,6,6,6,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
1,1,1,1,1,1,4,4,4,4,4,4,7,7,7,7,7,7,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8,
2,2,2,2,2,2,5,5,5,5,5,5,8,8,8,8,8,8])
N = len(z)
K = len(np.unique(z))
A =
|
np.random.normal(size=(K,K))
|
numpy.random.normal
|
# Copyright 2019 by <NAME>. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Classes to support internal coordinates for protein structures.
Internal coordinates comprise Psi, Phi and Omega dihedral angles along the
protein backbone, Chi angles along the sidechains, and all 3-atom angles and
bond lengths comprising a protein chain. These routines can compute internal
coordinates from atom XYZ coordinates, and compute atom XYZ coordinates from
internal coordinates.
Internal coordinates are defined on sequences of atoms which span
residues or follow accepted nomenclature along sidechains. To manage these
sequences and support Biopython's disorder mechanisms, AtomKey specifiers are
implemented to capture residue, atom and variant identification in a single
object. A Hedron object is specified as three sequential AtomKeys, comprising
two bond lengths and the bond angle between them. A Dihedron consists of four
sequential AtomKeys, linking two Hedra with a dihedral angle between them.
A Protein Internal Coordinate (.pic) file format is defined to capture
sufficient detail to reproduce a PDB file from chain starting coordinates
(first residue N, Ca, C XYZ coordinates) and remaining internal coordinates.
These files are used internally to verify that a given structure can be
regenerated from its internal coordinates.
Internal coordinates may also be exported as OpenSCAD data arrays for
generating 3D printed protein models. OpenSCAD software is provided as
proof-of-concept for generating such models.
The following classes comprise the core functionality for processing internal
coordinates and are sufficiently related and coupled to place them together in
this module:
IC_Chain: Extends Biopython Chain on .internal_coord attribute.
Manages connected sequence of residues and chain breaks; methods generally
apply IC_Residue methods along chain.
IC_Residue: Extends for Biopython Residue on .internal_coord attribute.
Most control and methods of interest are in this class, see API.
Dihedron: four joined atoms forming a dihedral angle.
Dihedral angle, homogeneous atom coordinates in local coordinate space,
references to relevant Hedra and IC_Residue. Methods to compute
residue dihedral angles, bond angles and bond lengths.
Hedron: three joined atoms forming a plane.
Contains homogeneous atom coordinates in local coordinate space as well as
bond lengths and angle between them.
Edron: base class for Hedron and Dihedron classes.
Tuple of AtomKeys comprising child, string ID, mainchain membership boolean
and other routines common for both Hedra and Dihedra. Implements rich
comparison.
AtomKey: keys (dictionary and string) for referencing atom sequences.
Capture residue and disorder/occupancy information, provides a
no-whitespace key for .pic files, and implements rich comparison.
Custom exception classes: HedronMatchError and MissingAtomError
"""
import re
from collections import deque, namedtuple
try:
import numpy # type: ignore
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy to build proteins from internal coordinates."
)
from Bio.PDB.Atom import Atom, DisorderedAtom
from Bio.PDB.Polypeptide import three_to_one
from Bio.PDB.vectors import coord_space, multi_rot_Z, multi_rot_Y
# , calc_dihedral, Vector
from Bio.PDB.ic_data import ic_data_backbone, ic_data_sidechains
from Bio.PDB.ic_data import ic_data_sidechain_extras, residue_atom_bond_state
# for type checking only
from typing import (
List,
Dict,
Set,
TextIO,
Union,
Tuple,
cast,
TYPE_CHECKING,
Optional,
)
if TYPE_CHECKING:
from Bio.PDB.Residue import Residue
from Bio.PDB.Chain import Chain
HKT = Tuple["AtomKey", "AtomKey", "AtomKey"] # Hedron key tuple
DKT = Tuple["AtomKey", "AtomKey", "AtomKey", "AtomKey"] # Dihedron Key Tuple
EKT = Union[HKT, DKT] # Edron Key Tuple
BKT = Tuple["AtomKey", "AtomKey"] # Bond Key Tuple
# HACS = Tuple[numpy.array, numpy.array, numpy.array] # Hedron Atom Coord Set
HACS = numpy.array # Hedron Atom Coord Set
DACS = Tuple[
numpy.array, numpy.array, numpy.array, numpy.array
] # Dihedron Atom Coord Set
class IC_Chain:
"""Class to extend Biopython Chain with internal coordinate data.
Attributes
----------
chain: biopython Chain object reference
The Chain object this extends
initNCaC: AtomKey indexed dictionary of N, Ca, C atom coordinates.
NCaCKeys start chain segments (first residue or after chain break).
These 3 atoms define the coordinate space for a contiguous chain segment,
as initially specified by PDB or mmCIF file.
MaxPeptideBond: **Class** attribute to detect chain breaks.
Override for fully contiguous chains with some very long bonds - e.g.
for 3D printing (OpenSCAD output) a structure with fully disordered
(missing) residues.
ordered_aa_ic_list: list of IC_Residue objects
IC_Residue objects ic algorithms can process (e.g. no waters)
hedra: dict indexed by 3-tuples of AtomKeys
Hedra forming residues in this chain
hedraLen: int length of hedra dict
hedraNdx: dict mapping hedra AtomKeys to numpy array data
dihedra: dict indexed by 4-tuples of AtomKeys
Dihedra forming (overlapping) this residue
dihedraLen: int length of dihedra dict
dihedraNdx: dict mapping dihedra AtomKeys to numpy array data
atomArray: numpy array of homogeneous atom coords for chain
atomArrayIndex: dict mapping AtomKeys to atomArray indexes
numpy arrays for vector processing of chain di/hedra:
hedraIC: length-angle-length entries for each hedron
hAtoms: homogeneous atom coordinates (3x4) of hedra, central atom at origin
hAtomsR: hAtoms in reverse order
hAtoms_needs_update: booleans indicating whether hAtoms represent hedraIC
dihedraIC: dihedral angles for each dihedron
dAtoms: homogeneous atom coordinates (4x4) of dihedra, second atom at origin
dAtoms_needs_update: booleans indicating whether dAtoms represent dihedraIC
Methods
-------
internal_to_atom_coordinates(verbose, start, fin)
Process ic data to Residue/Atom coordinates; calls assemble_residues()
followed by coords_to_structure()
assemble_residues(verbose, start, fin)
Generate IC_Residue atom coords from internal coordinates
coords_to_structure()
update Biopython Residue.Atom coords from IC_Residue coords for all
Residues with IC_Residue attributes
atom_to_internal_coordinates(verbose)
Calculate dihedrals, angles, bond lengths (internal coordinates) for
Atom data
link_residues()
Call link_dihedra() on each IC_Residue (needs rprev, rnext set)
set_residues()
Add .internal_coord attribute for all Residues in parent Chain, populate
ordered_aa_ic_list, set IC_Residue rprev, rnext or initNCaC coordinates
write_SCAD()
Write OpenSCAD matrices for internal coordinate data comprising chain
"""
MaxPeptideBond = 1.4 # larger C-N distance than this is chain break
def __init__(self, parent: "Chain", verbose: bool = False) -> None:
"""Initialize IC_Chain object, with or without residue/Atom data.
:param parent: Biopython Chain object
Chain object this extends
"""
# type hinting parent as Chain leads to import cycle
self.chain = parent
self.ordered_aa_ic_list: List[IC_Residue] = []
self.initNCaC: Dict[Tuple[str], Dict["AtomKey", numpy.array]] = {}
self.sqMaxPeptideBond = IC_Chain.MaxPeptideBond * IC_Chain.MaxPeptideBond
# need init here for _gen_edra():
self.hedra = {}
# self.hedraNdx = {}
self.dihedra = {}
# self.dihedraNdx = {}
self.set_residues(verbose) # no effect if no residues loaded
# return True if a0, a1 within supplied cutoff
def _atm_dist_chk(self, a0: Atom, a1: Atom, cutoff: float, sqCutoff: float) -> bool:
diff = a0.coord - a1.coord
sum = 0
for axis in diff:
if axis > cutoff:
# print("axis: ", axis)
return False
sum += axis * axis
if sum > sqCutoff:
# print("sq axis: ", sqrt(sum)) # need import math.sqrt
return False
return True
# return a string describing issue, or None if OK
def _peptide_check(self, prev: "Residue", curr: "Residue") -> Optional[str]:
if 0 == len(curr.child_dict):
# curr residue with no atoms => reading pic file, no break
return None
if (0 != len(curr.child_dict)) and (0 == len(prev.child_dict)):
# prev residue with no atoms, curr has atoms => reading pic file,
# have break
return "PIC data missing atoms"
# handle non-standard AA not marked as HETATM (1KQF, 1NTH)
if not prev.internal_coord.is20AA:
return "previous residue not standard amino acid"
# both biopython Residues have Atoms, so check distance
Natom = curr.child_dict.get("N", None)
pCatom = prev.child_dict.get("C", None)
if Natom is None or pCatom is None:
return f"missing {'previous C' if pCatom is None else 'N'} atom"
# confirm previous residue has all backbone atoms
pCAatom = prev.child_dict.get("CA", None)
pNatom = prev.child_dict.get("N", None)
if pNatom is None or pCAatom is None:
return "previous residue missing N or Ca"
tooFar = f"MaxPeptideBond ({IC_Chain.MaxPeptideBond} angstroms) exceeded"
if not Natom.is_disordered() and not pCatom.is_disordered():
dc = self._atm_dist_chk(
Natom, pCatom, IC_Chain.MaxPeptideBond, self.sqMaxPeptideBond
)
if dc:
return None
else:
return tooFar
Nlist: List[Atom] = []
pClist: List[Atom] = []
if Natom.is_disordered():
Nlist.extend(Natom.child_dict.values())
else:
Nlist = [Natom]
if pCatom.is_disordered():
pClist.extend(pCatom.child_dict.values())
else:
pClist = [pCatom]
for n in Nlist:
for c in pClist:
if self._atm_dist_chk(
Natom, pCatom, IC_Chain.MaxPeptideBond, self.sqMaxPeptideBond
):
return None
return tooFar
def clear_ic(self):
"""Clear residue internal_coord settings for this chain."""
for res in self.chain.get_residues():
res.internal_coord = None
def _add_residue(
self, res: "Residue", last_res: List, last_ord_res: List, verbose: bool = False
) -> bool:
"""Set rprev, rnext, determine chain break."""
if not res.internal_coord:
res.internal_coord = IC_Residue(res)
res.internal_coord.cic = self
if (
0 < len(last_res)
and last_ord_res == last_res
and self._peptide_check(last_ord_res[0].residue, res) is None
):
# no chain break
for prev in last_ord_res:
prev.rnext.append(res.internal_coord)
res.internal_coord.rprev.append(prev)
return True
elif all(atm in res.child_dict for atm in ("N", "CA", "C")):
# chain break, save coords for restart
if verbose and len(last_res) != 0: # not first residue
if last_ord_res != last_res:
reason = "disordered residues after {last_ord_res.pretty_str()}"
else:
reason = cast(
str, self._peptide_check(last_ord_res[0].residue, res)
)
print(
f"chain break at {res.internal_coord.pretty_str()} due to {reason}"
)
initNCaC: Dict["AtomKey", numpy.array] = {}
ric = res.internal_coord
for atm in ("N", "CA", "C"):
bpAtm = res.child_dict[atm]
if bpAtm.is_disordered():
for altAtom in bpAtm.child_dict.values():
ak = AtomKey(ric, altAtom)
initNCaC[ak] = IC_Residue.atm241(altAtom.coord)
else:
ak = AtomKey(ric, bpAtm)
initNCaC[ak] = IC_Residue.atm241(bpAtm.coord)
self.initNCaC[ric.rbase] = initNCaC
return True
elif (
0 == len(res.child_list)
and self.chain.child_list[0].id == res.id
and res.internal_coord.is20AA
):
# this is first residue, no atoms at all, is std amino acid
# conclude reading pic file with no N-Ca-C coords
return True
# chain break but do not have N, Ca, C coords to restart from
return False
def set_residues(self, verbose: bool = False) -> None:
"""Initialize internal_coord data for loaded Residues.
Add IC_Residue as .internal_coord attribute for each Residue in parent
Chain; populate ordered_aa_ic_list with IC_Residue references for residues
which can be built (amino acids and some hetatms); set rprev and rnext
on each sequential IC_Residue, populate initNCaC at start and after
chain breaks.
"""
# ndx = 0
last_res: List["IC_Residue"] = []
last_ord_res: List["IC_Residue"] = []
for res in self.chain.get_residues():
# select only not hetero or accepted hetero
if res.id[0] == " " or res.id[0] in IC_Residue.accept_resnames:
this_res: List["IC_Residue"] = []
if 2 == res.is_disordered():
# print('disordered res:', res.is_disordered(), res)
for r in res.child_dict.values():
if self._add_residue(r, last_res, last_ord_res, verbose):
this_res.append(r.internal_coord)
else:
if self._add_residue(res, last_res, last_ord_res, verbose):
this_res.append(res.internal_coord)
if 0 < len(this_res):
self.ordered_aa_ic_list.extend(this_res)
last_ord_res = this_res
last_res = this_res
def link_residues(self) -> None:
"""link_dihedra() for each IC_Residue; needs rprev, rnext set.
Called by PICIO:read_PIC() after finished reading chain
"""
for ric in self.ordered_aa_ic_list:
ric.cic = self
ric.link_dihedra()
def assemble_residues(
self,
verbose: bool = False,
start: Optional[int] = None,
fin: Optional[int] = None,
) -> None:
"""Generate IC_Residue atom coords from internal coordinates.
Filter positions between start and fin if set, find appropriate start
coordinates for each residue and pass to IC_Residue.assemble()
:param verbose bool: default False
describe runtime problems
:param: start, fin lists
sequence position, insert code for begin, end of subregion to
process
"""
for ric in self.ordered_aa_ic_list:
ric.clear_transforms()
for ric in self.ordered_aa_ic_list:
if not hasattr(ric, "NCaCKey"):
if verbose:
print(
f"no assembly for {str(ric)} due to missing N, Ca and/or C atoms"
)
continue
respos = ric.residue.id[1]
if start and start > respos:
continue
if fin and fin < respos:
continue
ric.atom_coords = cast(
Dict[AtomKey, numpy.array], ric.assemble(verbose=verbose)
)
if ric.atom_coords:
ric.ak_set = set(ric.atom_coords.keys())
def coords_to_structure(self) -> None:
"""Promote all ic atom_coords to Biopython Residue/Atom coords.
IC atom_coords are homogeneous [4], Biopython atom coords are XYZ [3].
"""
self.ndx = 0
for res in self.chain.get_residues():
if 2 == res.is_disordered():
for r in res.child_dict.values():
if r.internal_coord:
if r.internal_coord.atom_coords:
r.internal_coord.coords_to_residue()
elif (
r.internal_coord.rprev
and r.internal_coord.rprev[0].atom_coords
):
r.internal_coord.rprev[0].coords_to_residue(rnext=True)
elif res.internal_coord:
if res.internal_coord.atom_coords:
res.internal_coord.coords_to_residue()
elif (
res.internal_coord.rprev and res.internal_coord.rprev[0].atom_coords
):
res.internal_coord.rprev[0].coords_to_residue(rnext=True)
def init_edra(self) -> None:
"""Create chain level di/hedra arrays.
If called by read_PIC, self.di/hedra = {} and object tree has IC data.
-> build chain arrays from IC data
If called at start of atom_to_internal_coords, self.di/hedra fully
populated. -> create empty chain numpy arrays
In both cases, fix di/hedra object attributes to be views on
chain-level array data
"""
# hedra:
if self.hedra == {}:
# loaded objects from PIC file, so no chain-level hedra
hLAL = {}
for ric in self.ordered_aa_ic_list:
for k, h in ric.hedra.items():
self.hedra[k] = h
hLAL[k] = h.lal
self.hedraLen = len(self.hedra)
self.hedraIC = numpy.array(tuple(hLAL.values()))
else:
# atom_to_internal_coords() populates self.hedra via _gen_edra()
# a_to_ic will set ic so create empty
self.hedraLen = len(self.hedra)
self.hedraIC = numpy.empty((self.hedraLen, 3), dtype=numpy.float64)
self.hedraNdx = dict(zip(self.hedra.keys(), range(len(self.hedra))))
self.hAtoms: numpy.ndarray = numpy.zeros(
(self.hedraLen, 3, 4), dtype=numpy.float64
)
self.hAtoms[:, :, 3] = 1.0 # homogeneous
self.hAtomsR: numpy.ndarray = numpy.copy(self.hAtoms)
self.hAtoms_needs_update = numpy.full(self.hedraLen, True)
for ric in self.ordered_aa_ic_list:
for k, h in ric.hedra.items():
# all h.lal become views on hedraIC
h.lal = self.hedraIC[self.hedraNdx[k]]
# dihedra:
if self.dihedra == {}:
# loaded objects from PIC file, so no chain-level hedra
dic = {}
for ric in self.ordered_aa_ic_list:
for k, d in ric.dihedra.items():
self.dihedra[k] = d
dic[k] = d.angle
self.dihedraIC = numpy.array(tuple(dic.values()))
self.dihedraICr = numpy.deg2rad(self.dihedraIC)
self.dihedraLen = len(self.dihedra)
else:
# atom_to_internal_coords() populates self.hedra via _gen_edra()
# a_to_ic will set ic so create empty
self.dihedraLen = len(self.dihedra)
self.dihedraIC = numpy.empty(self.dihedraLen)
self.dihedraICr = numpy.empty(self.dihedraLen)
self.dihedraNdx = dict(zip(self.dihedra.keys(), range(len(self.dihedra))))
self.dAtoms: numpy.ndarray = numpy.empty(
(self.dihedraLen, 4, 4), dtype=numpy.float64
)
self.dAtoms[:, :, 3] = 1.0 # homogeneous
self.a4_pre_rotation = numpy.empty((self.dihedraLen, 4))
for k, d in self.dihedra.items():
d.initial_coords = self.dAtoms[self.dihedraNdx[k]]
d.a4_pre_rotation = self.a4_pre_rotation[self.dihedraNdx[k]]
self.dAtoms_needs_update = numpy.full(self.dihedraLen, True)
self.dRev = numpy.array(tuple(d.reverse for d in self.dihedra.values()))
self.dFwd = self.dRev != True # noqa: E712
self.dH1ndx = numpy.array(
tuple(self.hedraNdx[d.h1key] for d in self.dihedra.values())
)
self.dH2ndx = numpy.array(
tuple(self.hedraNdx[d.h2key] for d in self.dihedra.values())
)
# @profile
def init_atom_coords(self) -> None:
"""Set chain level di/hedra initial coord arrays from IC_Residue data."""
if not numpy.all(self.dAtoms_needs_update):
self.dAtoms_needs_update |= (self.hAtoms_needs_update[self.dH1ndx]) | (
self.hAtoms_needs_update[self.dH2ndx]
)
if numpy.any(self.hAtoms_needs_update):
# hedra initial coords
# supplementary angle radian: angles which add to 180 are supplementary
sar = numpy.deg2rad(
180.0 - self.hedraIC[:, 1][self.hAtoms_needs_update]
) # angle
sinSar = numpy.sin(sar)
cosSarN =
|
numpy.cos(sar)
|
numpy.cos
|
import base64
import unittest
import numpy as np
import scipy.ndimage as scind
import scipy.misc
import scipy.io.matlab
import centrosome.cpmorphology as morph
from centrosome.cpmorphology import fixup_scipy_ndimage_result as fix
from centrosome.filter import permutations
class TestFillLabeledHoles(unittest.TestCase):
def test_01_00_zeros(self):
"""A label matrix of all zeros has no hole"""
image = np.zeros((10,10),dtype=int)
output = morph.fill_labeled_holes(image)
self.assertTrue(np.all(output==0))
def test_01_01_ones(self):
"""Regression test - an image of all ones"""
image = np.ones((10,10),dtype=int)
output = morph.fill_labeled_holes(image)
self.assertTrue(np.all(output==1))
def test_02_object_without_holes(self):
"""The label matrix of a single object without holes has no hole"""
image = np.zeros((10,10),dtype=int)
image[3:6,3:6] = 1
output = morph.fill_labeled_holes(image)
self.assertTrue(np.all(output==image))
def test_03_object_with_hole(self):
image = np.zeros((20,20),dtype=int)
image[5:15,5:15] = 1
image[8:12,8:12] = 0
output = morph.fill_labeled_holes(image)
self.assertTrue(np.all(output[8:12,8:12] == 1))
output[8:12,8:12] = 0 # unfill the hole again
self.assertTrue(np.all(output==image))
def test_04_holes_on_edges_are_not_holes(self):
image = np.zeros((40,40),dtype=int)
objects = (((15,25),(0,10),(18,22),(0,3)),
((0,10),(15,25),(0,3),(18,22)),
((15,25),(30,39),(18,22),(36,39)),
((30,39),(15,25),(36,39),(18,22)))
for idx,x in zip(range(1,len(objects)+1),objects):
image[x[0][0]:x[0][1],x[1][0]:x[1][1]] = idx
image[x[2][0]:x[2][1],x[3][0]:x[3][1]] = 0
output = morph.fill_labeled_holes(image)
for x in objects:
self.assertTrue(np.all(output[x[2][0]:x[2][1],x[3][0]:x[3][1]]==0))
output[x[2][0]:x[2][1],x[3][0]:x[3][1]] = 1
self.assertTrue(np.all(output[x[0][0]:x[0][1],x[1][0]:x[1][1]]!=0))
def test_05_lots_of_objects_with_holes(self):
image = np.ones((1020,1020),bool)
for i in range(0,51):
image[i*20:i*20+10,:] = ~image[i*20:i*20+10,:]
image[:,i*20:i*20+10] = ~ image[:,i*20:i*20+10]
image = scind.binary_erosion(image, iterations = 2)
erosion = scind.binary_erosion(image, iterations = 2)
image = image & ~ erosion
labeled_image,nobjects = scind.label(image)
output = morph.fill_labeled_holes(labeled_image)
self.assertTrue(np.all(output[erosion] > 0))
def test_06_regression_diamond(self):
"""Check filling the center of a diamond"""
image = np.zeros((5,5),int)
image[1,2]=1
image[2,1]=1
image[2,3]=1
image[3,2]=1
output = morph.fill_labeled_holes(image)
where = np.argwhere(image != output)
self.assertEqual(len(where),1)
self.assertEqual(where[0][0],2)
self.assertEqual(where[0][1],2)
def test_07_regression_nearby_holes(self):
"""Check filling an object with three holes"""
image = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,0,0,0,0,0,0,1,0],
[0,1,0,1,0,0,0,0,0,0,1,0],
[0,1,1,1,0,0,0,0,0,0,1,0],
[0,1,0,0,0,0,0,0,0,0,1,0],
[0,1,1,1,0,0,0,0,0,0,1,0],
[0,1,0,1,0,0,0,0,0,0,1,0],
[0,1,1,1,0,0,0,0,0,0,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0]])
expec = np.array([[0,0,0,0,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0,0,0,0,0]])
output = morph.fill_labeled_holes(image)
self.assertTrue(np.all(output==expec))
def test_08_fill_small_holes(self):
"""Check filling only the small holes"""
image = np.zeros((10,20), int)
image[1:-1,1:-1] = 1
image[3:8,4:7] = 0 # A hole with area of 5*3 = 15 and not filled
expected = image.copy()
image[3:5, 11:18] = 0 # A hole with area 2*7 = 14 is filled
def small_hole_fn(area, is_foreground):
return area <= 14
output = morph.fill_labeled_holes(image, size_fn = small_hole_fn)
self.assertTrue(np.all(output == expected))
def test_09_fill_binary_image(self):
"""Make sure that we can fill a binary image too"""
image = np.zeros((10,20), bool)
image[1:-1, 1:-1] = True
image[3:8, 4:7] = False # A hole with area of 5*3 = 15 and not filled
expected = image.copy()
image[3:5, 11:18] = False # A hole with area 2*7 = 14 is filled
def small_hole_fn(area, is_foreground):
return area <= 14
output = morph.fill_labeled_holes(image, size_fn = small_hole_fn)
self.assertEqual(image.dtype.kind, output.dtype.kind)
self.assertTrue(np.all(output == expected))
def test_10_fill_bullseye(self):
i,j = np.mgrid[-50:50, -50:50]
bullseye = i * i + j * j < 2000
bullseye[i * i + j * j < 1000 ] = False
bullseye[i * i + j * j < 500 ] = True
bullseye[i * i + j * j < 250 ] = False
bullseye[i * i + j * j < 100 ] = True
labels, count = scind.label(bullseye)
result = morph.fill_labeled_holes(labels)
self.assertTrue(np.all(result[result != 0] == bullseye[6, 43]))
def test_11_dont_fill_if_touches_2(self):
labels = np.array([
[ 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 1, 1, 1, 2, 2, 2, 0 ],
[ 0, 1, 1, 0, 0, 2, 2, 0 ],
[ 0, 1, 1, 1, 2, 2, 2, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0 ]])
result = morph.fill_labeled_holes(labels)
self.assertTrue(np.all(labels == result))
def test_12_too_many_objects(self):
# Regression test of issue # 352 - code failed if
# more than 64K objects in background 4-labeling
#
# Create a checkerboard image. The 4-labeling will have > 64K
# labels.
#
i, j = np.mgrid[0:513, 0:513]
labels = (i % 2) != (j % 2)
# Program would segfault within this call
result = morph.fill_labeled_holes(labels)
def test_13_issue_1116(self):
# Regression test of issue # 1116. Object 727 is next to 762, but 762
# is not next to 727 causing 727 to be filled-in
labels = np.array(
[[694, 694, 694, 705, 705, 705, 705, 705, 705, 705],
[ 0, 0, 705, 705, 705, 705, 705, 705, 727, 762],
[ 0, 0, 705, 705, 705, 705, 705, 727, 727, 762],
[ 0, 0, 705, 705, 705, 705, 705, 727, 762, 762],
[761, 761, 761, 761, 705, 705, 762, 762, 762, 762],
[761, 761, 761, 761, 0, 762, 762, 762, 762, 762],
[761, 761, 761, 761, 762, 762, 762, 762, 762, 762]])
result = morph.fill_labeled_holes(labels)
self.assertTrue(np.sum(result == 727) == np.sum(labels==727))
class TestAdjacent(unittest.TestCase):
def test_00_00_zeros(self):
result = morph.adjacent(np.zeros((10,10), int))
self.assertTrue(np.all(result==False))
def test_01_01_one(self):
image = np.zeros((10,10), int)
image[2:5,3:8] = 1
result = morph.adjacent(image)
self.assertTrue(np.all(result==False))
def test_01_02_not_adjacent(self):
image = np.zeros((10,10), int)
image[2:5,3:8] = 1
image[6:8,3:8] = 2
result = morph.adjacent(image)
self.assertTrue(np.all(result==False))
def test_01_03_adjacent(self):
image = np.zeros((10,10), int)
image[2:8,3:5] = 1
image[2:8,5:8] = 2
expected = np.zeros((10,10), bool)
expected[2:8,4:6] = True
result = morph.adjacent(image)
self.assertTrue(np.all(result==expected))
def test_02_01_127_objects(self):
'''Test that adjacency works for int8 and 127 labels
Regression test of img-1099. Adjacent sets the background to the
maximum value of the labels matrix + 1. For 127 and int8, it wraps
around and uses -127.
'''
# Create 127 labels
labels = np.zeros((32,16), np.int8)
i,j = np.mgrid[0:32, 0:16]
mask = (i % 2 > 0) & (j % 2 > 0)
labels[mask] = np.arange(np.sum(mask))
result = morph.adjacent(labels)
self.assertTrue(np.all(result == False))
class TestStrelDisk(unittest.TestCase):
"""Test cellprofiler.cpmath.cpmorphology.strel_disk"""
def test_01_radius2(self):
"""Test strel_disk with a radius of 2"""
x = morph.strel_disk(2)
self.assertTrue(x.shape[0], 5)
self.assertTrue(x.shape[1], 5)
y = [0,0,1,0,0,
0,1,1,1,0,
1,1,1,1,1,
0,1,1,1,0,
0,0,1,0,0]
ya = np.array(y,dtype=float).reshape((5,5))
self.assertTrue(np.all(x==ya))
def test_02_radius2_point_5(self):
"""Test strel_disk with a radius of 2.5"""
x = morph.strel_disk(2.5)
self.assertTrue(x.shape[0], 5)
self.assertTrue(x.shape[1], 5)
y = [0,1,1,1,0,
1,1,1,1,1,
1,1,1,1,1,
1,1,1,1,1,
0,1,1,1,0]
ya = np.array(y,dtype=float).reshape((5,5))
self.assertTrue(np.all(x==ya))
class TestStrelDiamond(unittest.TestCase):
def test_01(self):
expected = [[ 0, 0, 1, 0, 0],
[ 0, 1, 1, 1, 0],
[ 1, 1, 1, 1, 1],
[ 0, 1, 1, 1, 0],
[ 0, 0, 1, 0, 0]]
np.testing.assert_array_equal(morph.strel_diamond(2), expected)
class TestStrelLine(unittest.TestCase):
def test_01(self):
test_cases = (
dict(angle=0, length=5, expected = [[1, 1, 1, 1, 1]]),
dict(angle=30, length=8, expected = [
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0]]),
dict(angle=60, length=8, expected = [
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]
]))
for test_case in test_cases:
angle = test_case['angle']
length = test_case['length']
expected = test_case['expected']
result = morph.strel_line(length, angle)
np.testing.assert_array_equal(result, expected)
class TestStrelOctagon(unittest.TestCase):
def test_01(self):
expected = [
[ 0, 0, 1, 1, 1, 0, 0 ],
[ 0, 1, 1, 1, 1, 1, 0 ],
[ 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1 ],
[ 1, 1, 1, 1, 1, 1, 1 ],
[ 0, 1, 1, 1, 1, 1, 0 ],
[ 0, 0, 1, 1, 1, 0, 0 ]]
result = morph.strel_octagon(3)
np.testing.assert_array_equal(expected, result)
class TestStrelPair(unittest.TestCase):
def test_01(self):
r = np.random.RandomState()
r.seed(1210)
for _ in range(20):
i, j = r.randint(-8, 9, 2)
strel = morph.strel_pair(j, i)
data = np.zeros((21, 21), bool)
data[10, 10] = True
result = scind.binary_dilation(data, strel)
expected = data.copy()
expected[i+10, j+10] = True
np.testing.assert_array_equal(result, expected)
class TestStrelPeriodicline(unittest.TestCase):
def test_01(self):
r = np.random.RandomState()
r.seed(1776)
for _ in range(20):
i, j = r.randint(-3, 4, 2)
n = r.randint(1, 3)
strel = morph.strel_periodicline(j, i, n)
data = np.zeros((41, 41), bool)
data[20, 20] = True
result = scind.binary_dilation(data, strel)
expected = np.zeros((41, 41), bool)
for k in range(-n, n+1):
expected[i*k+20, j*k+20] = True
np.testing.assert_array_equal(result, expected)
class TestStrelRectangle(unittest.TestCase):
def test_01(self):
for ih, iw, oh, ow in ((3, 3, 3, 3),
(3, 5, 3, 5),
(5, 3, 5, 3),
(7.5, 6, 7, 5)):
strel = morph.strel_rectangle(iw, ih)
self.assertTrue(np.all(strel))
self.assertEqual(strel.shape[0], oh)
self.assertEqual(strel.shape[1], ow)
class TestStrelSquare(unittest.TestCase):
def test_01(self):
strel = morph.strel_square(5)
self.assertEqual(strel.shape[0], 5)
self.assertEqual(strel.shape[1], 5)
def test_02(self):
strel = morph.strel_square(8.5)
self.assertEqual(strel.shape[0], 7)
self.assertEqual(strel.shape[1], 7)
class TestBinaryShrink(unittest.TestCase):
def test_01_zeros(self):
"""Shrink an empty array to itself"""
input = np.zeros((10,10),dtype=bool)
result = morph.binary_shrink(input,1)
self.assertTrue(np.all(input==result))
def test_02_cross(self):
"""Shrink a cross to a single point"""
input = np.zeros((9,9),dtype=bool)
input[4,:]=True
input[:,4]=True
result = morph.binary_shrink(input)
where = np.argwhere(result)
self.assertTrue(len(where)==1)
self.assertTrue(input[where[0][0],where[0][1]])
def test_03_x(self):
input = np.zeros((9,9),dtype=bool)
x,y = np.mgrid[-4:5,-4:5]
input[x==y]=True
input[x==-y]=True
result = morph.binary_shrink(input)
where = np.argwhere(result)
self.assertTrue(len(where)==1)
self.assertTrue(input[where[0][0],where[0][1]])
def test_04_block(self):
"""A block should shrink to a point"""
input = np.zeros((9,9), dtype=bool)
input[3:6,3:6]=True
result = morph.binary_shrink(input)
where = np.argwhere(result)
self.assertTrue(len(where)==1)
self.assertTrue(input[where[0][0],where[0][1]])
def test_05_hole(self):
"""A hole in a block should shrink to a ring"""
input = np.zeros((19,19), dtype=bool)
input[5:15,5:15]=True
input[9,9]=False
result = morph.binary_shrink(input)
where = np.argwhere(result)
self.assertTrue(len(where) > 1)
self.assertFalse(result[9:9])
def test_06_random_filled(self):
"""Shrink random blobs
If you label a random binary image, then fill the holes,
then shrink the result, each blob should shrink to a point
"""
np.random.seed(0)
input = np.random.uniform(size=(300,300)) > .8
labels,nlabels = scind.label(input,np.ones((3,3),bool))
filled_labels = morph.fill_labeled_holes(labels)
input = filled_labels > 0
result = morph.binary_shrink(input)
my_sum = scind.sum(result.astype(int),filled_labels,np.array(range(nlabels+1),dtype=np.int32))
my_sum = np.array(my_sum)
self.assertTrue(np.all(my_sum[1:] == 1))
def test_07_all_patterns_of_3x3(self):
'''Run all patterns of 3x3 with a 1 in the middle
All of these patterns should shrink to a single pixel since
all are 8-connected and there are no holes
'''
for i in range(512):
a = morph.pattern_of(i)
if a[1,1]:
result = morph.binary_shrink(a)
self.assertEqual(np.sum(result),1)
def test_08_labels(self):
'''Run a labels matrix through shrink with two touching objects'''
labels = np.zeros((10,10),int)
labels[2:8,2:5] = 1
labels[2:8,5:8] = 2
result = morph.binary_shrink(labels)
self.assertFalse(np.any(result[labels==0] > 0))
my_sum = fix(scind.sum(result>0, labels, np.arange(1,3,dtype=np.int32)))
self.assertTrue(np.all(my_sum == 1))
class TestCpmaximum(unittest.TestCase):
def test_01_zeros(self):
input = np.zeros((10,10))
output = morph.cpmaximum(input)
self.assertTrue(np.all(output==input))
def test_01_ones(self):
input = np.ones((10,10))
output = morph.cpmaximum(input)
self.assertTrue(np.all(np.abs(output-input)<=np.finfo(float).eps))
def test_02_center_point(self):
input = np.zeros((9,9))
input[4,4] = 1
expected = np.zeros((9,9))
expected[3:6,3:6] = 1
structure = np.ones((3,3),dtype=bool)
output = morph.cpmaximum(input,structure,(1,1))
self.assertTrue(np.all(output==expected))
def test_03_corner_point(self):
input = np.zeros((9,9))
input[0,0]=1
expected = np.zeros((9,9))
expected[:2,:2]=1
structure = np.ones((3,3),dtype=bool)
output = morph.cpmaximum(input,structure,(1,1))
self.assertTrue(np.all(output==expected))
def test_04_structure(self):
input = np.zeros((9,9))
input[0,0]=1
input[4,4]=1
structure = np.zeros((3,3),dtype=bool)
structure[0,0]=1
expected = np.zeros((9,9))
expected[1,1]=1
expected[5,5]=1
output = morph.cpmaximum(input,structure,(1,1))
self.assertTrue(np.all(output[1:,1:]==expected[1:,1:]))
def test_05_big_structure(self):
big_disk = morph.strel_disk(10).astype(bool)
input = np.zeros((1001,1001))
input[500,500] = 1
expected = np.zeros((1001,1001))
expected[490:511,490:511][big_disk]=1
output = morph.cpmaximum(input,big_disk)
self.assertTrue(np.all(output == expected))
class TestRelabel(unittest.TestCase):
def test_00_relabel_zeros(self):
input = np.zeros((10,10),int)
output,count = morph.relabel(input)
self.assertTrue(np.all(input==output))
self.assertEqual(count, 0)
def test_01_relabel_one(self):
input = np.zeros((10,10),int)
input[3:6,3:6]=1
output,count = morph.relabel(input)
self.assertTrue(np.all(input==output))
self.assertEqual(count,1)
def test_02_relabel_two_to_one(self):
input = np.zeros((10,10),int)
input[3:6,3:6]=2
output,count = morph.relabel(input)
self.assertTrue(np.all((output==1)[input==2]))
self.assertTrue(np.all((input==output)[input!=2]))
self.assertEqual(count,1)
def test_03_relabel_gap(self):
input = np.zeros((20,20),int)
input[3:6,3:6]=1
input[3:6,12:15]=3
output,count = morph.relabel(input)
self.assertTrue(np.all((output==2)[input==3]))
self.assertTrue(np.all((input==output)[input!=3]))
self.assertEqual(count,2)
class TestConvexHull(unittest.TestCase):
def test_00_00_zeros(self):
"""Make sure convex_hull can handle an empty array"""
result,counts = morph.convex_hull(np.zeros((10,10),int), [])
self.assertEqual(np.product(result.shape),0)
self.assertEqual(np.product(counts.shape),0)
def test_01_01_zeros(self):
"""Make sure convex_hull can work if a label has no points"""
result,counts = morph.convex_hull(np.zeros((10,10),int), [1])
self.assertEqual(np.product(result.shape),0)
self.assertEqual(np.product(counts.shape),1)
self.assertEqual(counts[0],0)
def test_01_02_point(self):
"""Make sure convex_hull can handle the degenerate case of one point"""
labels = np.zeros((10,10),int)
labels[4,5] = 1
result,counts = morph.convex_hull(labels,[1])
self.assertEqual(result.shape,(1,3))
self.assertEqual(result[0,0],1)
self.assertEqual(result[0,1],4)
self.assertEqual(result[0,2],5)
self.assertEqual(counts[0],1)
def test_01_030_line(self):
"""Make sure convex_hull can handle the degenerate case of a line"""
labels = np.zeros((10,10),int)
labels[2:8,5] = 1
result,counts = morph.convex_hull(labels,[1])
self.assertEqual(counts[0],2)
self.assertEqual(result.shape,(2,3))
self.assertTrue(np.all(result[:,0]==1))
self.assertTrue(result[0,1] in (2,7))
self.assertTrue(result[1,1] in (2,7))
self.assertTrue(np.all(result[:,2]==5))
def test_01_031_odd_line(self):
"""Make sure convex_hull can handle the degenerate case of a line with odd length
This is a regression test: the line has a point in the center if
it's odd and the sign of the difference of that point is zero
which causes it to be included in the hull.
"""
labels = np.zeros((10,10),int)
labels[2:7,5] = 1
result,counts = morph.convex_hull(labels,[1])
self.assertEqual(counts[0],2)
self.assertEqual(result.shape,(2,3))
self.assertTrue(np.all(result[:,0]==1))
self.assertTrue(result[0,1] in (2,6))
self.assertTrue(result[1,1] in (2,6))
self.assertTrue(np.all(result[:,2]==5))
def test_01_032_diagonal_line(self):
#
# Regression test for issue 1412 - diagonal line
#
labels = np.zeros((20, 20), int)
test_case = np.array([
[ 8, 11, 1],
[ 8, 12, 1],
[ 9, 13, 1],
[ 16, 9, 2],
[ 17, 7, 2],
[ 17, 8, 2],
[ 17, 9, 2],
[ 17, 10, 2],
[ 18, 7, 2],
[ 18, 8, 2],
[ 18, 9, 2]])
labels[test_case[:, 0], test_case[:, 1]] = test_case[:, 2]
result, counts = morph.convex_hull(labels, [1, 2])
self.assertEqual(len(counts), 2)
self.assertEqual(counts[0], 3)
self.assertEqual(counts[1], 5)
def test_01_04_square(self):
"""Make sure convex_hull can handle a square which is not degenerate"""
labels = np.zeros((10,10),int)
labels[2:7,3:8] = 1
result,counts = morph.convex_hull(labels,[1])
self.assertEqual(counts[0],4)
order = np.lexsort((result[:,2], result[:,1]))
result = result[order,:]
expected = np.array([[1,2,3],
[1,2,7],
[1,6,3],
[1,6,7]])
self.assertTrue(np.all(result==expected))
def test_02_01_out_of_order(self):
"""Make sure convex_hull can handle out of order indices"""
labels = np.zeros((10,10),int)
labels[2,3] = 1
labels[5,6] = 2
result,counts = morph.convex_hull(labels,[2,1])
self.assertEqual(counts.shape[0],2)
self.assertTrue(np.all(counts==1))
expected = np.array([[2,5,6],[1,2,3]])
self.assertTrue(np.all(result == expected))
def test_02_02_out_of_order(self):
"""Make sure convex_hull can handle out of order indices
that require different #s of loop iterations"""
labels = np.zeros((10,10),int)
labels[2,3] = 1
labels[1:7,4:8] = 2
result,counts = morph.convex_hull(labels, [2,1])
self.assertEqual(counts.shape[0],2)
self.assertTrue(np.all(counts==(4,1)))
self.assertEqual(result.shape,(5,3))
order = np.lexsort((result[:,2],result[:,1],
np.array([0,2,1])[result[:,0]]))
result = result[order,:]
expected = np.array([[2,1,4],
[2,1,7],
[2,6,4],
[2,6,7],
[1,2,3]])
self.assertTrue(np.all(result==expected))
def test_02_03_two_squares(self):
"""Make sure convex_hull can handle two complex shapes"""
labels = np.zeros((10,10),int)
labels[1:5,3:7] = 1
labels[6:10,1:7] = 2
result,counts = morph.convex_hull(labels, [1,2])
self.assertEqual(counts.shape[0],2)
self.assertTrue(np.all(counts==(4,4)))
order = np.lexsort((result[:,2],result[:,1],result[:,0]))
result = result[order,:]
expected = np.array([[1,1,3],[1,1,6],[1,4,3],[1,4,6],
[2,6,1],[2,6,6],[2,9,1],[2,9,6]])
self.assertTrue(np.all(result==expected))
def test_03_01_concave(self):
"""Make sure convex_hull handles a square with a concavity"""
labels = np.zeros((10,10),int)
labels[2:8,3:9] = 1
labels[3:7,3] = 0
labels[2:6,4] = 0
labels[4:5,5] = 0
result,counts = morph.convex_hull(labels,[1])
self.assertEqual(counts[0],4)
order = np.lexsort((result[:,2],result[:,1],result[:,0]))
result = result[order,:]
expected = np.array([[1,2,3],
[1,2,8],
[1,7,3],
[1,7,8]])
self.assertTrue(np.all(result==expected))
def test_04_01_regression(self):
"""The set of points given in this case yielded one in the interior"""
np.random.seed(0)
s = 10 # divide each image into this many mini-squares with a shape in each
side = 250
mini_side = side / s
ct = 20
labels = np.zeros((side,side),int)
pts = np.zeros((s*s*ct,2),int)
index = np.array(range(pts.shape[0])).astype(float)/float(ct)
index = index.astype(int)
idx = 0
for i in range(0,side,mini_side):
for j in range(0,side,mini_side):
idx = idx+1
# get ct+1 unique points
p = np.random.uniform(low=0,high=mini_side,
size=(ct+1,2)).astype(int)
while True:
pu = np.unique(p[:,0]+p[:,1]*mini_side)
if pu.shape[0] == ct+1:
break
p[:pu.shape[0],0] = np.mod(pu,mini_side).astype(int)
p[:pu.shape[0],1] = (pu / mini_side).astype(int)
p_size = (ct+1-pu.shape[0],2)
p[pu.shape[0],:] = np.random.uniform(low=0,
high=mini_side,
size=p_size)
# Use the last point as the "center" and order
# all of the other points according to their angles
# to this "center"
center = p[ct,:]
v = p[:ct,:]-center
angle = np.arctan2(v[:,0],v[:,1])
order = np.lexsort((angle,))
p = p[:ct][order]
p[:,0] = p[:,0]+i
p[:,1] = p[:,1]+j
pts[(idx-1)*ct:idx*ct,:]=p
#
# draw lines on the labels
#
for k in range(ct):
morph.draw_line(labels, p[k,:], p[(k+1)%ct,:], idx)
self.assertTrue(labels[5,106]==5)
result,counts = morph.convex_hull(labels,np.array(range(100))+1)
self.assertFalse(np.any(np.logical_and(result[:,1]==5,
result[:,2]==106)))
def test_05_01_missing_labels(self):
'''Ensure that there's an entry if a label has no corresponding points'''
labels = np.zeros((10,10),int)
labels[3:6,2:8] = 2
result, counts = morph.convex_hull(labels, np.arange(2)+1)
self.assertEqual(counts.shape[0], 2)
self.assertEqual(counts[0], 0)
self.assertEqual(counts[1], 4)
def test_06_01_regression_373(self):
'''Regression test of IMG-374'''
labels = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
result, counts = morph.convex_hull(labels, np.array([1]))
self.assertEqual(counts[0], 2)
def test_06_02_same_point_twice(self):
'''Regression test of convex_hull_ijv - same point twice in list'''
ii = [79, 11, 65, 73, 42, 26, 46, 48, 14, 53, 73, 42, 59, 12, 59, 65, 7, 66, 84, 70]
jj = [47, 97, 98, 0, 91, 49, 42, 85, 63, 19, 0, 9, 71, 15, 50, 98, 14, 46, 89, 47]
h, c = morph.convex_hull_ijv(
np.column_stack((ii, jj, np.ones(len(ii)))), [1])
self.assertTrue(np.any((h[:,1] == 73) & (h[:,2] == 0)))
def test_07_01_new_vs_old(self):
'''Test Cython version vs original Python version'''
labels = np.random.randint(0, 1000, (1000, 1000))
indexes = np.arange(1, 1005) # mix in some empty indices
np.random.shuffle(indexes)
new_hulls, new_counts = morph.convex_hull(labels, indexes, fast=True)
old_hulls, old_counts = morph.convex_hull(labels, indexes, fast=False)
old_hull_ends = np.cumsum(old_counts)
old_hull_starts = old_hull_ends - old_counts
new_hull_ends = np.cumsum(new_counts)
new_hull_starts = new_hull_ends - new_counts
for i in range(len(indexes)):
if old_counts[i] == 0:
self.assertEqual(new_counts[i], 0)
break
old_hull_i = old_hulls[old_hull_starts[i]:old_hull_ends[i], :]
new_hull_i = new_hulls[new_hull_starts[i]:new_hull_ends[i], :]
# The python code can sometimes leave sets of co-linear points,
# which we need to prune to compare to the new output, which never
# should do so. To test that, we prune colinear points from the
# python output, and compare to the unpruned Cython version.
def colinear(idx, hull):
prev_idx = idx - 1
next_idx = idx + 1
if next_idx >= hull.shape[0]:
next_idx -= hull.shape[0]
# discard label information and compute prev and current step directions
dprev = hull[idx, 1:] - hull[prev_idx, 1:]
dcur = hull[next_idx, 1:] - hull[idx, 1:]
# Colinear = zero dot product and same step direction
return (dprev[0] * dcur[1] - dprev[1] * dcur[0] == 0 and
np.sign(dprev[0]) == np.sign(dcur[0]) and
np.sign(dprev[1]) == np.sign(dcur[1]))
mask = np.array([not colinear(idx, old_hull_i) for idx in range(old_hull_i.shape[0])])
old_hull_i = old_hull_i[mask, :]
old_hull_i = old_hull_i[np.lexsort(old_hull_i.T), :]
new_hull_i = new_hull_i[np.lexsort(new_hull_i.T), :]
np.testing.assert_array_equal(old_hull_i, new_hull_i)
def test_08_01_segfault(self):
#
# Regression test: this caused a segfault
#
m, v = morph.convex_hull_ijv(chtest_08_01_data, np.arange(1, 16))
self.assertEqual(np.sum(m[:,0]==15), 14)
class TestConvexHullImage(unittest.TestCase):
def test_00_00_zeros(self):
image = np.zeros((10,13), bool)
output = morph.convex_hull_image(image)
self.assertTrue(np.all(output == False))
def test_01_01_square(self):
image = np.zeros((10,13), bool)
image[2:5,3:8] = True
output = morph.convex_hull_image(image)
self.assertTrue(np.all(output == image))
def test_01_02_concave(self):
image = np.zeros((10,13), bool)
image[2:5,3:8] = True
image2 = image.copy()
image2[4,4:7] = False
output = morph.convex_hull_image(image2)
self.assertTrue(np.all(output == image))
class TestFillConvexHulls(unittest.TestCase):
def test_01_00_nothing(self):
ijv = morph.fill_convex_hulls(np.zeros((0, 3), int), np.zeros(1, int))
self.assertEqual(len(ijv), 0)
def test_01_01_one_circle(self):
i, j = np.mgrid[-10:11, -10:11]
labels = (i*i + j*j <= 64).astype(int)
convex_hull, _, _ = morph.get_outline_pts(labels, [1])
pts = np.column_stack((np.ones(convex_hull.shape[0], int), convex_hull))
ijv = morph.fill_convex_hulls(pts, np.array([pts.shape[0]]))
self.assertTrue(np.sum(labels) == ijv.shape[0])
self.assertTrue(np.all(labels[ijv[:,0], ijv[:, 1]] == 1))
labels[ijv[:, 0], ijv[:, 1]] = 0
self.assertTrue(np.all(labels == 0))
def test_01_02_one_point(self):
pts = np.array([[1, 5, 6]])
ijv = morph.fill_convex_hulls(pts, np.array([1]))
self.assertEqual(len(ijv), 1)
self.assertEqual(tuple(ijv[0].tolist()), (5, 6, 1))
def test_01_03_two_circles(self):
i, j = np.mgrid[-10:16, -10:16]
labels1 = np.zeros(i.shape, int)
d = np.sqrt((i*i + j*j).astype(float))
labels1[(d <= 8)] = 1
pts1, counts1 = morph.convex_hull(labels1)
labels2 = np.zeros(i.shape, int)
i -= 5
j -= 6
d = np.sqrt((i*i + j*j).astype(float))
labels2[(d <= 7)] = 2
pts2, counts2 = morph.convex_hull(labels2)
ijv = morph.fill_convex_hulls(
np.vstack((pts1, pts2)),
np.vstack((counts1, counts2)))
for l, labels in ((1, labels1), (2, labels2)):
mask = np.zeros(labels.shape, bool)
mask[ijv[ijv[:, 2] == l, 0], ijv[ijv[:, 2] == l, 1]] = True
self.assertTrue(np.all(mask == (labels == l)))
def test_01_04_two_squares(self):
ijv = morph.fill_convex_hulls(
np.array([[1, 10, 10],
[1, 10, 20],
[1, 20, 20],
[1, 20, 10],
[2, 15, 16],
[2, 15, 22],
[2, 21, 22],
[2, 21, 16]]), np.array([4, 4]))
for l, i, j, d in ((1, 10, 10, 11), (2, 15, 16, 7)):
expected = np.zeros((30, 30), bool)
expected[i:(i+d), j:(j+d)] = True
actual = np.zeros((30, 30), bool)
actual[ijv[ijv[:, 2] == l, 0], ijv[ijv[:, 2] == l, 1]] = True
self.assertTrue(np.all(expected == actual))
class TestMinimumEnclosingCircle(unittest.TestCase):
def test_00_00_zeros(self):
"""Make sure minimum_enclosing_circle can handle an empty array"""
center,radius = morph.minimum_enclosing_circle(np.zeros((10,10),int), [])
self.assertEqual(np.product(center.shape),0)
self.assertEqual(np.product(radius.shape),0)
def test_01_01_01_zeros(self):
"""Make sure minimum_enclosing_circle can work if a label has no points"""
center,radius = morph.minimum_enclosing_circle(np.zeros((10,10),int), [1])
self.assertEqual(center.shape,(1,2))
self.assertEqual(np.product(radius.shape),1)
self.assertEqual(radius[0],0)
def test_01_01_02_zeros(self):
"""Make sure minimum_enclosing_circle can work if one of two labels has no points
This is a regression test of a bug
"""
labels = np.zeros((10,10), int)
labels[2,2:5] = 3
labels[2,6:9] = 4
hull_and_point_count = morph.convex_hull(labels)
center,radius = morph.minimum_enclosing_circle(
labels,
hull_and_point_count=hull_and_point_count)
self.assertEqual(center.shape,(2,2))
self.assertEqual(np.product(radius.shape),2)
def test_01_02_point(self):
"""Make sure minimum_enclosing_circle can handle the degenerate case of one point"""
labels = np.zeros((10,10),int)
labels[4,5] = 1
center,radius = morph.minimum_enclosing_circle(labels,[1])
self.assertEqual(center.shape,(1,2))
self.assertEqual(radius.shape,(1,))
self.assertTrue(np.all(center==np.array([(4,5)])))
self.assertEqual(radius[0],0)
def test_01_03_line(self):
"""Make sure minimum_enclosing_circle can handle the degenerate case of a line"""
labels = np.zeros((10,10),int)
labels[2:7,5] = 1
center,radius = morph.minimum_enclosing_circle(labels,[1])
self.assertTrue(np.all(center==np.array([(4,5)])))
self.assertEqual(radius[0],2)
def test_01_04_square(self):
"""Make sure minimum_enclosing_circle can handle a square which is not degenerate"""
labels = np.zeros((10,10),int)
labels[2:7,3:8] = 1
center,radius = morph.minimum_enclosing_circle(labels,[1])
self.assertTrue(np.all(center==np.array([(4,5)])))
self.assertAlmostEqual(radius[0],np.sqrt(8))
def test_02_01_out_of_order(self):
"""Make sure minimum_enclosing_circle can handle out of order indices"""
labels = np.zeros((10,10),int)
labels[2,3] = 1
labels[5,6] = 2
center,radius = morph.minimum_enclosing_circle(labels,[2,1])
self.assertEqual(center.shape,(2,2))
expected_center = np.array(((5,6),(2,3)))
self.assertTrue(np.all(center == expected_center))
def test_02_02_out_of_order(self):
"""Make sure minimum_enclosing_circle can handle out of order indices
that require different #s of loop iterations"""
labels = np.zeros((10,10),int)
labels[2,3] = 1
labels[1:6,4:9] = 2
center,result = morph.minimum_enclosing_circle(labels, [2,1])
expected_center = np.array(((3,6),(2,3)))
self.assertTrue(np.all(center == expected_center))
def test_03_01_random_polygons(self):
"""Test minimum_enclosing_circle on 250 random dodecagons"""
np.random.seed(0)
s = 10 # divide each image into this many mini-squares with a shape in each
side = 250
mini_side = side / s
ct = 20
#
# We keep going until we get at least 10 multi-edge cases -
# polygons where the minimum enclosing circle intersects 3+ vertices
#
n_multi_edge = 0
while n_multi_edge < 10:
labels = np.zeros((side,side),int)
pts = np.zeros((s*s*ct,2),int)
index = np.array(range(pts.shape[0])).astype(float)/float(ct)
index = index.astype(int)
idx = 0
for i in range(0,side,mini_side):
for j in range(0,side,mini_side):
idx = idx+1
# get ct+1 unique points
p = np.random.uniform(low=0,high=mini_side,
size=(ct+1,2)).astype(int)
while True:
pu = np.unique(p[:,0]+p[:,1]*mini_side)
if pu.shape[0] == ct+1:
break
p[:pu.shape[0],0] = np.mod(pu,mini_side).astype(int)
p[:pu.shape[0],1] = (pu / mini_side).astype(int)
p_size = (ct+1-pu.shape[0],2)
p[pu.shape[0],:] = np.random.uniform(low=0,
high=mini_side,
size=p_size)
# Use the last point as the "center" and order
# all of the other points according to their angles
# to this "center"
center = p[ct,:]
v = p[:ct,:]-center
angle = np.arctan2(v[:,0],v[:,1])
order = np.lexsort((angle,))
p = p[:ct][order]
p[:,0] = p[:,0]+i
p[:,1] = p[:,1]+j
pts[(idx-1)*ct:idx*ct,:]=p
#
# draw lines on the labels
#
for k in range(ct):
morph.draw_line(labels, p[k,:], p[(k+1)%ct,:], idx)
center,radius = morph.minimum_enclosing_circle(labels,
np.array(range(s**2))+1)
epsilon = .000001
center_per_pt = center[index]
radius_per_pt = radius[index]
distance_from_center = np.sqrt(np.sum((pts.astype(float)-
center_per_pt)**2,1))
#
# All points must be within the enclosing circle
#
self.assertTrue(np.all(distance_from_center - epsilon < radius_per_pt))
pt_on_edge = np.abs(distance_from_center - radius_per_pt)<epsilon
count_pt_on_edge = scind.sum(pt_on_edge,
index,
np.array(range(s**2),dtype=np.int32))
count_pt_on_edge = np.array(count_pt_on_edge)
#
# Every dodecagon must have at least 2 points on the edge.
#
self.assertTrue(np.all(count_pt_on_edge>=2))
#
# Count the multi_edge cases
#
n_multi_edge += np.sum(count_pt_on_edge>=3)
class TestEllipseFromSecondMoments(unittest.TestCase):
def assertWithinFraction(self, actual, expected,
fraction=.001, message=None):
"""Assert that a "correlation" of the actual value to the expected is within the fraction
actual - the value as calculated
expected - the expected value of the variable
fraction - the fractional difference of the two
message - message to print on failure
We divide the absolute difference by 1/2 of the sum of the variables
to get our measurement.
"""
measurement = abs(actual-expected)/(2*(actual+expected))
self.assertTrue(measurement < fraction,
"%(actual)f != %(expected)f by the measure, abs(%(actual)f-%(expected)f)) / 2(%(actual)f + %(expected)f)"%(locals()))
def test_00_00_zeros(self):
centers,eccentricity,major_axis_length,minor_axis_length,theta =\
morph.ellipse_from_second_moments(np.zeros((10,10)),
np.zeros((10,10),int),
[])
self.assertEqual(centers.shape,(0,2))
self.assertEqual(eccentricity.shape[0],0)
self.assertEqual(major_axis_length.shape[0],0)
self.assertEqual(minor_axis_length.shape[0],0)
def test_00_01_zeros(self):
centers,eccentricity,major_axis_length,minor_axis_length,theta =\
morph.ellipse_from_second_moments(np.zeros((10,10)),
np.zeros((10,10),int),
[1])
self.assertEqual(centers.shape,(1,2))
self.assertEqual(eccentricity.shape[0],1)
self.assertEqual(major_axis_length.shape[0],1)
self.assertEqual(minor_axis_length.shape[0],1)
def test_01_01_rectangle(self):
centers,eccentricity,major_axis_length,minor_axis_length,theta =\
morph.ellipse_from_second_moments(np.ones((10,20)),
np.ones((10,20),int),
[1])
self.assertEqual(centers.shape,(1,2))
self.assertEqual(eccentricity.shape[0],1)
self.assertEqual(major_axis_length.shape[0],1)
self.assertEqual(minor_axis_length.shape[0],1)
self.assertAlmostEqual(eccentricity[0],.866,2)
self.assertAlmostEqual(centers[0,0],4.5)
self.assertAlmostEqual(centers[0,1],9.5)
self.assertWithinFraction(major_axis_length[0],23.0940,.001)
self.assertWithinFraction(minor_axis_length[0],11.5470,.001)
self.assertAlmostEqual(theta[0],0)
def test_01_02_circle(self):
img = np.zeros((101,101),int)
y,x = np.mgrid[-50:51,-50:51]
img[x*x+y*y<=2500] = 1
centers,eccentricity,major_axis_length, minor_axis_length,theta =\
morph.ellipse_from_second_moments(np.ones((101,101)),img,[1])
self.assertAlmostEqual(eccentricity[0],0)
self.assertWithinFraction(major_axis_length[0],100,.001)
self.assertWithinFraction(minor_axis_length[0],100,.001)
def test_01_03_blob(self):
'''Regression test a blob against Matlab measurements'''
blob = np.array(
[[0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0],
[0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0],
[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0]])
centers,eccentricity,major_axis_length, minor_axis_length,theta =\
morph.ellipse_from_second_moments(np.ones(blob.shape),blob,[1])
self.assertAlmostEqual(major_axis_length[0],37.55,1)
self.assertAlmostEqual(minor_axis_length[0],18.99,1)
self.assertAlmostEqual(eccentricity[0],0.8627,2)
self.assertAlmostEqual(centers[0,1],14.1689,2)
self.assertAlmostEqual(centers[0,0],14.8691,2)
def test_02_01_compactness_square(self):
image = np.zeros((9,9), int)
image[1:8,1:8] = 1
compactness = morph.ellipse_from_second_moments(
np.ones(image.shape), image, [1], True)[-1]
i,j = np.mgrid[0:9, 0:9]
v_i = np.var(i[image > 0])
v_j = np.var(j[image > 0])
v = v_i + v_j
area = np.sum(image > 0)
expected = 2 * np.pi * v / area
self.assertAlmostEqual(compactness, expected)
class TestCalculateExtents(unittest.TestCase):
def test_00_00_zeros(self):
"""Make sure calculate_extents doesn't throw an exception if no image"""
extents = morph.calculate_extents(np.zeros((10,10),int), [1])
def test_01_01_square(self):
"""A square should have an extent of 1"""
labels = np.zeros((10,10),int)
labels[1:8,2:9]=1
extents = morph.calculate_extents(labels,[1])
self.assertAlmostEqual(extents,1)
def test_01_02_circle(self):
"""A circle should have an extent of pi/4"""
labels = np.zeros((1001,1001),int)
y,x = np.mgrid[-500:501,-500:501]
labels[x*x+y*y<=250000] = 1
extents = morph.calculate_extents(labels,[1])
self.assertAlmostEqual(extents,np.pi/4,2)
def test_01_03_two_objects(self):
'''Make sure that calculate_extents works with more than one object
Regression test of a bug: was computing area like this:
scind.sum(labels, labels, indexes)
which works for the object that's labeled "1", but is 2x for 2, 3x
for 3, etc... oops.
'''
labels = np.zeros((10,20), int)
labels[3:7, 2:5] = 1
labels[3:5, 5:8] = 1
labels[2:8, 13:17] = 2
extents = morph.calculate_extents(labels, [1,2])
self.assertEqual(len(extents), 2)
self.assertAlmostEqual(extents[0], .75)
self.assertAlmostEqual(extents[1], 1)
class TestMedianOfLabels(unittest.TestCase):
def test_00_00_zeros(self):
result = morph.median_of_labels(np.zeros((10,10)),
np.zeros((10,10), int),
np.zeros(0, int))
self.assertEqual(len(result), 0)
def test_00_01_empty(self):
result = morph.median_of_labels(np.zeros((10,10)),
np.zeros((10,10), int),
[1])
self.assertEqual(len(result), 1)
self.assertTrue(np.isnan(result[0]))
def test_01_01_one_odd(self):
r = np.random.RandomState()
r.seed(11)
fill = r.uniform(size=25)
img = np.zeros((10,10))
labels = np.zeros((10,10), int)
labels[3:8,3:8] = 1
img[labels > 0] = fill
result = morph.median_of_labels(img, labels, [ 1 ])
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0], np.median(fill))
def test_01_02_one_even(self):
r = np.random.RandomState()
r.seed(12)
fill = r.uniform(size=20)
img = np.zeros((10,10))
labels = np.zeros((10,10), int)
labels[3:8,3:7] = 1
img[labels > 0] = fill
result = morph.median_of_labels(img, labels, [ 1 ])
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0], np.median(fill))
def test_01_03_two(self):
r = np.random.RandomState()
r.seed(12)
img = np.zeros((10,20))
labels = np.zeros((10,20), int)
labels[3:8,3:7] = 1
labels[3:8,13:18] = 2
for i, fill in enumerate([r.uniform(size=20), r.uniform(size=25)]):
img[labels == i+1] = fill
result = morph.median_of_labels(img, labels, [ 1,2 ])
self.assertEqual(len(result), 2)
self.assertAlmostEqual(result[0], np.median(img[labels==1]))
self.assertAlmostEqual(result[1], np.median(img[labels==2]))
class TestCalculatePerimeters(unittest.TestCase):
def test_00_00_zeros(self):
"""The perimeters of a zeros matrix should be all zero"""
perimeters = morph.calculate_perimeters(np.zeros((10,10),int),[1])
self.assertEqual(perimeters,0)
def test_01_01_square(self):
"""The perimeter of a square should be the sum of the sides"""
labels = np.zeros((10,10),int)
labels[1:9,1:9] = 1
perimeter = morph.calculate_perimeters(labels, [1])
self.assertEqual(perimeter, 4*8)
def test_01_02_circle(self):
"""The perimeter of a circle should be pi * diameter"""
labels = np.zeros((101,101),int)
y,x = np.mgrid[-50:51,-50:51]
labels[x*x+y*y<=2500] = 1
perimeter = morph.calculate_perimeters(labels, [1])
epsilon = 20
self.assertTrue(perimeter-np.pi*101<epsilon)
def test_01_03_on_edge(self):
"""Check the perimeter of objects touching edges of matrix"""
labels = np.zeros((10,20), int)
labels[:4,:4] = 1 # 4x4 square = 16 pixel perimeter
labels[-4:,-2:] = 2 # 4x2 square = 2+2+4+4 = 12
expected = [ 16, 12]
perimeter = morph.calculate_perimeters(labels, [1,2])
self.assertEqual(len(perimeter), 2)
self.assertEqual(perimeter[0], expected[0])
self.assertEqual(perimeter[1], expected[1])
class TestCalculateConvexArea(unittest.TestCase):
def test_00_00_degenerate_zero(self):
"""The convex area of an empty labels matrix should be zero"""
labels = np.zeros((10,10),int)
result = morph.calculate_convex_hull_areas(labels, [1])
self.assertEqual(result.shape[0],1)
self.assertEqual(result[0],0)
def test_00_01_degenerate_point(self):
"""The convex area of a point should be 1"""
labels = np.zeros((10,10),int)
labels[4,4] = 1
result = morph.calculate_convex_hull_areas(labels, [1])
self.assertEqual(result.shape[0],1)
self.assertEqual(result[0],1)
def test_00_02_degenerate_line(self):
"""The convex area of a line should be its length"""
labels = np.zeros((10,10),int)
labels[1:9,4] = 1
result = morph.calculate_convex_hull_areas(labels, [1])
self.assertEqual(result.shape[0],1)
self.assertEqual(result[0],8)
def test_01_01_square(self):
"""The convex area of a square should be its area"""
labels = np.zeros((10,10),int)
labels[1:9,1:9] = 1
result = morph.calculate_convex_hull_areas(labels, [1])
self.assertEqual(result.shape[0],1)
self.assertAlmostEqual(result[0],64)
def test_01_02_cross(self):
"""The convex area of a cross should be the area of the enclosing diamond
The area of a diamond is 1/2 of the area of the enclosing bounding box
"""
labels = np.zeros((10,10),int)
labels[1:9,4] = 1
labels[4,1:9] = 1
result = morph.calculate_convex_hull_areas(labels, [1])
self.assertEqual(result.shape[0],1)
self.assertAlmostEqual(result[0],32)
def test_02_01_degenerate_point_and_line(self):
"""Test a degenerate point and line in the same image, out of order"""
labels = np.zeros((10,10),int)
labels[1,1] = 1
labels[1:9,4] = 2
result = morph.calculate_convex_hull_areas(labels, [2,1])
self.assertEqual(result.shape[0],2)
self.assertEqual(result[0],8)
self.assertEqual(result[1],1)
def test_02_02_degenerate_point_and_square(self):
"""Test a degenerate point and a square in the same image"""
labels = np.zeros((10,10),int)
labels[1,1] = 1
labels[3:8,4:9] = 2
result = morph.calculate_convex_hull_areas(labels, [2,1])
self.assertEqual(result.shape[0],2)
self.assertEqual(result[1],1)
self.assertAlmostEqual(result[0],25)
def test_02_03_square_and_cross(self):
"""Test two non-degenerate figures"""
labels = np.zeros((20,10),int)
labels[1:9,1:9] = 1
labels[11:19,4] = 2
labels[14,1:9] = 2
result = morph.calculate_convex_hull_areas(labels, [2,1])
self.assertEqual(result.shape[0],2)
self.assertAlmostEqual(result[0],32)
self.assertAlmostEqual(result[1],64)
class TestEulerNumber(unittest.TestCase):
def test_00_00_even_zeros(self):
labels = np.zeros((10,12),int)
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],0)
def test_00_01_odd_zeros(self):
labels = np.zeros((11,13),int)
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],0)
def test_01_00_square(self):
labels = np.zeros((10,12),int)
labels[1:9,1:9] = 1
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],1)
def test_01_01_square_with_hole(self):
labels = np.zeros((10,12),int)
labels[1:9,1:9] = 1
labels[3:6,3:6] = 0
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],0)
def test_01_02_square_with_two_holes(self):
labels = np.zeros((10,12),int)
labels[1:9,1:9] = 1
labels[2:4,2:8] = 0
labels[6:8,2:8] = 0
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],-1)
def test_02_01_square_touches_border(self):
labels = np.ones((10,10),int)
result = morph.euler_number(labels, [1])
self.assertEqual(len(result),1)
self.assertEqual(result[0],1)
def test_03_01_two_objects(self):
labels =
|
np.zeros((10,10), int)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 10:13:17 2018
@author: damodara
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
def build_uniform_P(size, noise):
""" The noise matrix flips any class to any other with probability
noise / (#class - 1).
"""
assert(noise >= 0.) and (noise <= 1.)
P = noise / (size - 1) * np.ones((size, size))
np.fill_diagonal(P, (1 - noise) * np.ones(size))
assert_array_almost_equal(P.sum(axis=1), 1, 1)
return P
def build_for_cifar100(size, noise):
""" The noise matrix flips to the "next" class with probability 'noise'.
"""
assert(noise >= 0.) and (noise <= 1.)
P = (1. - noise) * np.eye(size)
for i in np.arange(size - 1):
P[i, i+1] = noise
# adjust last row
P[size-1, 0] = noise
assert_array_almost_equal(P.sum(axis=1), 1, 1)
return P
def row_normalize_P(P, copy=True):
if copy:
P_norm = P.copy()
else:
P_norm = P
D = np.sum(P, axis=1)
for i in np.arange(P_norm.shape[0]):
P_norm[i, :] /= D[i]
return P_norm
def multiclass_noisify(y, P, random_state=0):
""" Flip classes according to transition probability matrix T.
It expects a number between 0 and the number of classes - 1.
"""
assert P.shape[0] == P.shape[1]
assert np.max(y) < P.shape[0]
# row stochastic matrix
assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
assert (P >= 0.0).all()
m = y.shape[0]
new_y = y.copy()
flipper = np.random.RandomState(random_state)
for idx in np.arange(m):
i = y[idx]
# draw a vector with only an 1
flipped = flipper.multinomial(1, P[i, :], 1)[0]
new_y[idx] = np.where(flipped == 1)[0]
return new_y
def noisify_with_P(label, noise, P=None, random_state=None):
nb_classes = len(np.unique(label))
if noise > 0.0:
if P is None:
P = build_uniform_P(nb_classes, noise)
# seed the random numbers with #run
label_noisy = multiclass_noisify(label, P=P,
random_state=random_state)
actual_noise = (label_noisy != label).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
label = label_noisy
# else:
# P = np.eye(nb_classes)
return label, P
def mnist_simulate_noisylabel(label, noise, random_state=None):
'''
simulates the noisy label for mnist data
mistakes:
1 <- 7
2 -> 7
3 -> 8
5 <-> 6
'''
label = label.ravel()
n_class = len(np.unique(label))
P= np.eye(n_class)
n = noise
if n>0.0:
# 1<-7
P[7,7], P[7,1] = 1. - n, n
# 2 ->7
P[2, 2], P[2, 7] = 1. -n, n
# 5 <-> 6
P[5,5], P[5,6] = 1. -n, n
P[6,6], P[6,5] = 1. -n, n
# 3 ->8
P[3,3], P[3,8] = 1. -n, n
label_noisy = multiclass_noisify(label, P=P,
random_state=random_state)
actual_noise = (label_noisy != label).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
label = label_noisy
return label, P
def noisify_fashionmnist_asymmetric(y_train, noise, random_state=None):
"""mistakes:
9 --> 7
4 <--> 6
6 -->2
3 -->0
5 -->7
"""
nb_classes = 10
P =
|
np.eye(nb_classes)
|
numpy.eye
|
# -*- coding: utf-8 -*-
"""
This module contains the class to compute lensing properties of any
elliptical profile using Shajib (2019)'s Gauss decomposition.
"""
__author__ = 'ajshajib'
import numpy as np
import abc
from scipy.special import comb
from future.utils import with_metaclass
from lenstronomy.LensModel.Profiles.gaussian_ellipse_kappa import GaussianEllipseKappa
from lenstronomy.LensModel.Profiles.sersic_utils import SersicUtil
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
_SQRT_2PI = np.sqrt(2*np.pi)
class GaussianEllipseKappaSet(LensProfileBase):
"""
This class computes the lensing properties of a set of concentric
elliptical Gaussian convergences.
"""
param_names = ['amp', 'sigma', 'e1', 'e2', 'center_x', 'center_y']
lower_limit_default = {'amp': 0, 'sigma': 0, 'e1': -0.5, 'e2': -0.5,
'center_x': -100, 'center_y': -100}
upper_limit_default = {'amp': 100, 'sigma': 100, 'e1': 0.5, 'e2': 0.5,
'center_x': 100, 'center_y': 100}
def __init__(self, use_scipy_wofz=True, min_ellipticity=1e-5):
"""
:param use_scipy_wofz: To initiate ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.
:type use_scipy_wofz: ``bool``
:param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.
:type min_ellipticity: ``float``
"""
self.gaussian_ellipse_kappa = GaussianEllipseKappa(
use_scipy_wofz=use_scipy_wofz,
min_ellipticity=min_ellipticity)
super(GaussianEllipseKappaSet, self).__init__()
def function(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):
"""
Compute the potential function for a set of concentric elliptical
Gaussian convergence profiles.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param amp: Amplitude of Gaussian, convention: :math:`A/(2 \pi\sigma^2) \exp(-(x^2+y^2/q^2)/2\sigma^2)`
:type amp: ``numpy.array`` with ``dtype=float``
:param sigma: Standard deviation of Gaussian
:type sigma: ``numpy.array`` with ``dtype=float``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordianate of centroid
:type center_y: ``float``
:return: Potential for elliptical Gaussian convergence
:rtype: ``float``, or ``numpy.array`` with ``shape = x.shape``
"""
function = np.zeros_like(x, dtype=float)
for i in range(len(amp)):
function += self.gaussian_ellipse_kappa.function(x, y,
amp[i],
sigma[i], e1,
e2,
center_x,
center_y)
return function
def derivatives(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):
"""
Compute the derivatives of function angles :math:`\partial
f/\partial x`, :math:`\partial f/\partial y` at :math:`x,\ y` for a
set of concentric elliptic Gaussian convergence profiles.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param amp: Amplitude of Gaussian, convention: :math:`A/(2 \pi\sigma^2) \exp(-(x^2+y^2/q^2)/2\sigma^2)`
:type amp: ``numpy.array`` with ``dtype=float``
:param sigma: Standard deviation of Gaussian
:type sigma: ``numpy.array`` with ``dtype=float``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordianate of centroid
:type center_y: ``float``
:return: Deflection angle :math:`\partial f/\partial x`, :math:`\partial f/\partial y` for elliptical Gaussian convergence
:rtype: tuple ``(float, float)`` or ``(numpy.array, numpy.array)`` with each ``numpy`` array's shape equal to ``x.shape``
"""
f_x = np.zeros_like(x, dtype=float)
f_y = np.zeros_like(x, dtype=float)
for i in range(len(amp)):
f_x_i, f_y_i = self.gaussian_ellipse_kappa.derivatives(x, y,
amp=amp[i],
sigma=sigma[i], e1=e1,
e2=e2, center_x=center_x,
center_y=center_y)
f_x += f_x_i
f_y += f_y_i
return f_x, f_y
def hessian(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):
"""
Compute Hessian matrix of function :math:`\partial^2f/\partial x^2`,
:math:`\partial^2 f/\partial y^2`, :math:`\partial^2 f/\partial
x\partial y` for a set of concentric elliptic Gaussian convergence
profiles.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param amp: Amplitude of Gaussian, convention: :math:`A/(2 \pi\sigma^2) \exp(-(x^2+y^2/q^2)/2\sigma^2)`
:type amp: ``numpy.array`` with ``dtype=float``
:param sigma: Standard deviation of Gaussian
:type sigma: ``numpy.array`` with ``dtype=float``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordianate of centroid
:type center_y: ``float``
:return: Hessian :math:`\partial^2f/\partial x^2`, :math:`\partial^2 f/\partial y^2`, :math:`\partial^2/\partial x\partial y` for elliptical Gaussian convergence.
:rtype: tuple ``(float, float, float)`` , or ``(numpy.array, numpy.array, numpy.array)`` with each ``numpy`` array's shape equal to ``x.shape``
"""
f_xx = np.zeros_like(x, dtype=float)
f_yy = np.zeros_like(x, dtype=float)
f_xy = np.zeros_like(x, dtype=float)
for i in range(len(amp)):
f_xx_i, f_yy_i, f_xy_i = self.gaussian_ellipse_kappa.hessian(
x, y,
amp=amp[i],
sigma=sigma[i],
e1=e1,
e2=e2,
center_x=center_x,
center_y=center_y)
f_xx += f_xx_i
f_yy += f_yy_i
f_xy += f_xy_i
return f_xx, f_yy, f_xy
def density_2d(self, x, y, amp, sigma, e1, e2, center_x=0, center_y=0):
"""
Compute the density of a set of concentric elliptical Gaussian
convergence profiles :math:`\sum A/(2\pi \sigma^2) \exp(-(
x^2+y^2/q^2)/2\sigma^2)`.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param amp: Amplitude of Gaussian, convention: :math:`A/(2 \pi\sigma^2) \exp(-(x^2+y^2/q^2)/2\sigma^2)`
:type amp: ``numpy.array`` with ``dtype=float``
:param sigma: Standard deviation of Gaussian
:type sigma: ``numpy.array`` with ``dtype=float``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordianate of centroid
:type center_y: ``float``
:return: Density :math:`\kappa` for elliptical Gaussian convergence
:rtype: ``float``, or ``numpy.array`` with shape equal to ``x.shape``
"""
density_2d = np.zeros_like(x, dtype=float)
for i in range(len(amp)):
density_2d += self.gaussian_ellipse_kappa.density_2d(x, y,
amp=amp[i],
sigma=sigma[i],
e1=e1, e2=e2,
center_x=center_x,
center_y=center_y)
return density_2d
class GaussDecompositionAbstract(with_metaclass(abc.ABCMeta)):
"""
This abstract class sets up a template for computing lensing properties of
an elliptical convergence through Shajib (2019)'s Gauss decomposition.
"""
def __init__(self, n_sigma=15, sigma_start_mult=0.02, sigma_end_mult=15.,
precision=10, use_scipy_wofz=True, min_ellipticity=1e-5):
"""
Set up settings for the Gaussian decomposition. For more details about
the decomposition parameters, see Shajib (2019).
:param n_sigma: Number of Gaussian components
:type n_sigma: ``int``
:param sigma_start_mult: Lower range of logarithmically spaced sigmas
:type sigma_start_mult: ``float``
:param sigma_end_mult: Upper range of logarithmically spaced sigmas
:type sigma_end_mult: ``float``
:param precision: Numerical precision of Gaussian decomposition
:type precision: ``int``
:param use_scipy_wofz: To be passed to ``class GaussianEllipseKappa``. If ``True``, Gaussian lensing will use ``scipy.special.wofz`` function. Set ``False`` for lower precision, but faster speed.
:type use_scipy_wofz: ``bool``
:param min_ellipticity: To be passed to ``class GaussianEllipseKappa``. Minimum ellipticity for Gaussian elliptical lensing calculation. For lower ellipticity than min_ellipticity the equations for the spherical case will be used.
:type min_ellipticity: ``float``
"""
self.gaussian_set = GaussianEllipseKappaSet(
use_scipy_wofz=use_scipy_wofz,
min_ellipticity=min_ellipticity)
self.n_sigma = n_sigma
self.sigma_start_mult = sigma_start_mult
self.sigma_end_mult = sigma_end_mult
self.precision = precision
p = self.precision
# nodes and weights based on Fourier-Euler method
# for details <NAME> (2006)
kes = np.arange(2 * p + 1)
self.betas = np.sqrt(2 * p * np.log(10) / 3. + 2. * 1j * np.pi * kes)
epsilons = np.zeros(2 * p + 1)
epsilons[0] = 0.5
epsilons[1:p + 1] = 1.
epsilons[-1] = 1 / 2. ** p
for k in range(1, p):
epsilons[2 * p - k] = epsilons[2 * p - k + 1] + 1 / 2. ** p * comb(
p, k)
self.etas = (-1.) ** kes * epsilons * 10. ** (p / 3.) * 2. * \
_SQRT_2PI
def gauss_decompose(self, **kwargs):
r"""
Compute the amplitudes and sigmas of Gaussian components using the
integral transform with Gaussian kernel from Shajib (2019). The
returned values are in the convention of eq. (2.13).
:param func: The function to decompose
:type func: ``function``
:param \**kwargs: Keyword arguments to send to ``func``
:return: Amplitudes and standard deviations of the Gaussian components
:rtype: tuple ``(numpy.array, numpy.array)``
"""
sigma_start = self.sigma_start_mult*self.get_scale(**kwargs)
sigma_end = self.sigma_end_mult*self.get_scale(**kwargs)
sigmas = np.logspace(np.log10(sigma_start), np.log10(sigma_end),
self.n_sigma)
f_sigmas = np.sum(self.etas * self.get_kappa_1d(
sigmas[:,np.newaxis]*self.betas[np.newaxis, :],
**kwargs).real,
axis=1
)
del_log_sigma = np.abs(np.diff(np.log(sigmas)).mean())
amps = f_sigmas * del_log_sigma / _SQRT_2PI
# weighting for trapezoid method integral
amps[0] *= 0.5
amps[-1] *= 0.5
return amps, sigmas
@abc.abstractmethod
def get_scale(self, **kwargs):
"""
Abstract method to identify the keyword argument for the scale size
among the profile parameters of the child class' convergence profile.
:param \**kwargs: Keyword arguments
:return: Scale size
:rtype: ``float``
"""
@abc.abstractmethod
def get_kappa_1d(self, y, **kwargs):
r"""
Abstract method to compute the spherical Sersic profile at y.
The concrete method has to defined by the child class.
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param \**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile
"""
def function(self, x, y, e1=0., e2=0., center_x=0.,
center_y=0., **kwargs):
r"""
Compute the deflection potential of a Gauss-decomposed
elliptic convergence.
:param x: x coordinate
:type x: ``float``
:param y: y coordinate
:type y: ``float``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordinate of centroid
:type center_y: ``float``
:param \**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile
:return: Deflection potential
:rtype: ``float``
"""
amps, sigmas = self.gauss_decompose(**kwargs)
# converting the amplitude convention A -> A/(2*pi*sigma^2)
amps *= 2.*np.pi * sigmas * sigmas
return self.gaussian_set.function(x, y, amps, sigmas, e1, e2,
center_x, center_y)
def derivatives(self, x, y, e1=0., e2=0., center_x=0.,
center_y=0., **kwargs):
r"""
Compute the derivatives of the deflection potential :math:`\partial
f/\partial x`, :math:`\partial f/\partial y` for a Gauss-decomposed
elliptic convergence.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordinate of centroid
:type center_y: ``float``
:param \**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile
:return: Derivatives of deflection potential
:rtype: tuple ``(type(x), type(x))``
"""
amps, sigmas = self.gauss_decompose(**kwargs)
# converting the amplitude convention A -> A/(2*pi*sigma^2)
amps *= 2. * np.pi * sigmas * sigmas
return self.gaussian_set.derivatives(x, y, amps, sigmas, e1, e2,
center_x, center_y)
def hessian(self, x, y, e1=0., e2=0., center_x=0.,
center_y=0., **kwargs):
r"""
Compute the Hessian of the deflection potential
:math:`\partial^2f/\partial x^2`, :math:`\partial^2 f/ \partial
y^2`, :math:`\partial^2 f/\partial x\partial y` of a Gauss-decomposed
elliptic Sersic convergence.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordinate of centroid
:type center_y: ``float``
:param \**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile
:return: Hessian of deflection potential
:rtype: tuple ``(type(x), type(x), type(x))``
"""
amps, sigmas = self.gauss_decompose(**kwargs)
# converting the amplitude convention A -> A/(2*pi*sigma^2)
amps *= 2. * np.pi * sigmas * sigmas
return self.gaussian_set.hessian(x, y, amps, sigmas, e1, e2,
center_x, center_y)
def density_2d(self, x, y, e1=0., e2=0., center_x=0.,
center_y=0., **kwargs):
r"""
Compute the convergence profile for Gauss-decomposed elliptic Sersic profile.
:param x: x coordinate
:type x: ``float`` or ``numpy.array``
:param y: y coordinate
:type y: ``float`` or ``numpy.array``
:param e1: Ellipticity parameter 1
:type e1: ``float``
:param e2: Ellipticity parameter 2
:type e2: ``float``
:param center_x: x coordinate of centroid
:type center_x: ``float``
:param center_y: y coordinate of centroid
:type center_y: ``float``
:param \**kwargs: Keyword arguments that are defined by the child class that are particular for the convergence profile in the child class.
:return: Convergence profile
:rtype: ``type(x)``
"""
amps, sigmas = self.gauss_decompose(**kwargs)
# converting the amplitude convention A -> A/(2*pi*sigma^2)
amps *= 2. * np.pi * sigmas * sigmas
return self.gaussian_set.density_2d(x, y, amps, sigmas, e1, e2,
center_x, center_y)
class SersicEllipseGaussDec(GaussDecompositionAbstract):
"""
This class computes the lensing properties of an elliptical Sersic
profile using the Shajib (2019)'s Gauss decomposition method.
"""
param_names = ['k_eff', 'R_sersic', 'n_sersic', 'e1', 'e2', 'center_x',
'center_y']
lower_limit_default = {'k_eff': 0., 'R_sersic': 0., 'n_sersic': 0.5,
'e1': -0.5, 'e2': -0.5, 'center_x': -100.,
'center_y': -100.}
upper_limit_default = {'k_eff': 100., 'R_sersic': 100., 'n_sersic': 8.,
'e1': 0.5, 'e2': 0.5, 'center_x': 100.,
'center_y': 100.}
def get_kappa_1d(self, y, **kwargs):
r"""
Compute the spherical Sersic profile at y.
:param y: y coordinate
:type y: ``float``
:param \**kwargs: Keyword arguments
:Keyword Arguments:
* **n_sersic** (``float``) --
Sersic index
* **R_sersic** (``float``) --
Sersic scale radius
* **k_eff** (``float``) --
Sersic convergence at R_sersic
:return: Sersic function at y
:rtype: ``type(y)``
"""
n_sersic = kwargs['n_sersic']
R_sersic = kwargs['R_sersic']
k_eff = kwargs['k_eff']
bn = SersicUtil.b_n(n_sersic)
return k_eff *
|
np.exp(-bn * (y / R_sersic) ** (1. / n_sersic) + bn)
|
numpy.exp
|
# -*- coding: utf-8 -*-
import os.path as op
import numpy as np
import nibabel as nb
from nipype.external.version import LooseVersion
from ... import logging
from ..base import TraitedSpec, BaseInterfaceInputSpec, File, isdefined, traits
from .base import (
DipyBaseInterface,
HAVE_DIPY,
dipy_version,
dipy_to_nipype_interface,
get_dipy_workflows,
)
IFLOGGER = logging.getLogger("nipype.interface")
if HAVE_DIPY and (
LooseVersion("0.15") >= LooseVersion(dipy_version()) >= LooseVersion("0.16")
):
try:
from dipy.workflows.tracking import LocalFiberTrackingPAMFlow as DetTrackFlow
except ImportError: # different name in 0.15
from dipy.workflows.tracking import DetTrackPAMFlow as DetTrackFlow
DeterministicTracking = dipy_to_nipype_interface(
"DeterministicTracking", DetTrackFlow
)
if HAVE_DIPY and LooseVersion(dipy_version()) >= LooseVersion("0.15"):
from dipy.workflows import segment, tracking
l_wkflw = get_dipy_workflows(segment) + get_dipy_workflows(tracking)
for name, obj in l_wkflw:
new_name = name.replace("Flow", "")
globals()[new_name] = dipy_to_nipype_interface(new_name, obj)
del l_wkflw
else:
IFLOGGER.info(
"We advise you to upgrade DIPY version. This upgrade will"
" open access to more function"
)
class TrackDensityMapInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="The input TrackVis track file")
reference = File(
exists=True, desc="A reference file to define RAS coordinates space"
)
points_space = traits.Enum(
"rasmm", "voxel", None, usedefault=True, desc="coordinates of trk file"
)
voxel_dims = traits.List(
traits.Float, minlen=3, maxlen=3, desc="The size of each voxel in mm."
)
data_dims = traits.List(
traits.Int, minlen=3, maxlen=3, desc="The size of the image in voxels."
)
out_filename = File(
"tdi.nii",
usedefault=True,
desc="The output filename for the tracks in TrackVis " "(.trk) format",
)
class TrackDensityMapOutputSpec(TraitedSpec):
out_file = File(exists=True)
class TrackDensityMap(DipyBaseInterface):
"""
Creates a tract density image from a TrackVis track file using functions
from dipy
Example
-------
>>> import nipype.interfaces.dipy as dipy
>>> trk2tdi = dipy.TrackDensityMap()
>>> trk2tdi.inputs.in_file = 'converted.trk'
>>> trk2tdi.run() # doctest: +SKIP
"""
input_spec = TrackDensityMapInputSpec
output_spec = TrackDensityMapOutputSpec
def _run_interface(self, runtime):
from numpy import min_scalar_type
from dipy.tracking.utils import density_map
import nibabel.trackvis as nbt
tracks, header = nbt.read(self.inputs.in_file)
streams = ((ii[0]) for ii in tracks)
if isdefined(self.inputs.reference):
refnii = nb.load(self.inputs.reference)
affine = refnii.affine
data_dims = refnii.shape[:3]
kwargs = dict(affine=affine)
else:
IFLOGGER.warning(
"voxel_dims and data_dims are deprecated as of dipy "
"0.7.1. Please use reference input instead"
)
if not isdefined(self.inputs.data_dims):
data_dims = header["dim"]
else:
data_dims = self.inputs.data_dims
if not isdefined(self.inputs.voxel_dims):
voxel_size = header["voxel_size"]
else:
voxel_size = self.inputs.voxel_dims
affine = header["vox_to_ras"]
kwargs = dict(voxel_size=voxel_size)
data = density_map(streams, data_dims, **kwargs)
data = data.astype(min_scalar_type(data.max()))
img = nb.Nifti1Image(data, affine)
out_file = op.abspath(self.inputs.out_filename)
nb.save(img, out_file)
IFLOGGER.info(
"Track density map saved as %s, size=%s, dimensions=%s",
out_file,
img.shape,
img.header.get_zooms(),
)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = op.abspath(self.inputs.out_filename)
return outputs
class StreamlineTractographyInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc=("input diffusion data"))
in_model = File(exists=True, desc=("input f/d-ODF model extracted from."))
tracking_mask = File(exists=True, desc=("input mask within which perform tracking"))
seed_mask = File(exists=True, desc=("input mask within which perform seeding"))
in_peaks = File(exists=True, desc=("peaks computed from the odf"))
seed_coord = File(
exists=True,
desc=("file containing the list of seed voxel " "coordinates (N,3)"),
)
gfa_thresh = traits.Float(
0.2,
mandatory=True,
usedefault=True,
desc=("GFA threshold to compute tracking mask"),
)
peak_threshold = traits.Float(
0.5,
mandatory=True,
usedefault=True,
desc=("threshold to consider peaks from model"),
)
min_angle = traits.Float(
25.0, mandatory=True, usedefault=True, desc=("minimum separation angle")
)
multiprocess = traits.Bool(
True, mandatory=True, usedefault=True, desc=("use multiprocessing")
)
save_seeds = traits.Bool(
False, mandatory=True, usedefault=True, desc=("save seeding voxels coordinates")
)
num_seeds = traits.Int(
10000,
mandatory=True,
usedefault=True,
desc=("desired number of tracks in tractography"),
)
out_prefix = traits.Str(desc=("output prefix for file names"))
class StreamlineTractographyOutputSpec(TraitedSpec):
tracks = File(desc="TrackVis file containing extracted streamlines")
gfa = File(
desc=(
"The resulting GFA (generalized FA) computed using the " "peaks of the ODF"
)
)
odf_peaks = File(desc=("peaks computed from the odf"))
out_seeds = File(
desc=("file containing the (N,3) *voxel* coordinates used" " in seeding.")
)
class StreamlineTractography(DipyBaseInterface):
"""
Streamline tractography using EuDX [Garyfallidis12]_.
.. [Garyfallidis12] <NAME>., “Towards an accurate brain
tractography”, PhD thesis, University of Cambridge, 2012
Example
-------
>>> from nipype.interfaces import dipy as ndp
>>> track = ndp.StreamlineTractography()
>>> track.inputs.in_file = '4d_dwi.nii'
>>> track.inputs.in_model = 'model.pklz'
>>> track.inputs.tracking_mask = 'dilated_wm_mask.nii'
>>> res = track.run() # doctest: +SKIP
"""
input_spec = StreamlineTractographyInputSpec
output_spec = StreamlineTractographyOutputSpec
def _run_interface(self, runtime):
from dipy.reconst.peaks import peaks_from_model
from dipy.tracking.eudx import EuDX
from dipy.data import get_sphere
# import marshal as pickle
import pickle as pickle
import gzip
if not (isdefined(self.inputs.in_model) or isdefined(self.inputs.in_peaks)):
raise RuntimeError(
("At least one of in_model or in_peaks should " "be supplied")
)
img = nb.load(self.inputs.in_file)
imref = nb.four_to_three(img)[0]
affine = img.affine
data = img.get_fdata(dtype=np.float32)
hdr = imref.header.copy()
hdr.set_data_dtype(np.float32)
hdr["data_type"] = 16
sphere = get_sphere("symmetric724")
self._save_peaks = False
if isdefined(self.inputs.in_peaks):
IFLOGGER.info("Peaks file found, skipping ODF peaks search...")
f = gzip.open(self.inputs.in_peaks, "rb")
peaks = pickle.load(f)
f.close()
else:
self._save_peaks = True
IFLOGGER.info("Loading model and computing ODF peaks")
f = gzip.open(self.inputs.in_model, "rb")
odf_model = pickle.load(f)
f.close()
peaks = peaks_from_model(
model=odf_model,
data=data,
sphere=sphere,
relative_peak_threshold=self.inputs.peak_threshold,
min_separation_angle=self.inputs.min_angle,
parallel=self.inputs.multiprocess,
)
f = gzip.open(self._gen_filename("peaks", ext=".pklz"), "wb")
pickle.dump(peaks, f, -1)
f.close()
hdr.set_data_shape(peaks.gfa.shape)
nb.Nifti1Image(peaks.gfa.astype(np.float32), affine, hdr).to_filename(
self._gen_filename("gfa")
)
IFLOGGER.info("Performing tractography")
if isdefined(self.inputs.tracking_mask):
msk = np.asanyarray(nb.load(self.inputs.tracking_mask).dataobj)
msk[msk > 0] = 1
msk[msk < 0] = 0
else:
msk = np.ones(imref.shape)
gfa = peaks.gfa * msk
seeds = self.inputs.num_seeds
if isdefined(self.inputs.seed_coord):
seeds = np.loadtxt(self.inputs.seed_coord)
elif isdefined(self.inputs.seed_mask):
seedmsk = np.asanyarray(nb.load(self.inputs.seed_mask).dataobj)
assert seedmsk.shape == data.shape[:3]
seedmsk[seedmsk > 0] = 1
seedmsk[seedmsk < 1] = 0
seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T
vseeds = seedps.shape[0]
nsperv = (seeds // vseeds) + 1
IFLOGGER.info(
"Seed mask is provided (%d voxels inside "
"mask), computing seeds (%d seeds/voxel).",
vseeds,
nsperv,
)
if nsperv > 1:
IFLOGGER.info(
"Needed %d seeds per selected voxel (total %d).", nsperv, vseeds
)
seedps = np.vstack(
|
np.array([seedps] * nsperv)
|
numpy.array
|
"""Incremental Feature Dependency Discovery"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# iFDD implementation based on ICML 2011 paper
from builtins import super
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
from copy import deepcopy
import numpy as np
from rlpy.Tools import printClass, PriorityQueueWithNovelty
from rlpy.Tools import powerset, combinations, addNewElementForAllActions
from rlpy.Tools import plt
from .Representation import Representation
import warnings
from collections import defaultdict
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>"]
__license__ = "BSD 3-Clause"
__author__ = "<NAME>"
class iFDD_feature(object):
''' This object represents a feature used for linear function approximation.
The feature can be an initial feature or made using the conjunction of existing features.
index: is the location of the feature in the feature vector
f_set: set of features that their conjunction creates this feature.
p1, p2: parents of the current feature. Notice that multiple combinations of parents may
lead to the same feature. For example given 3 basic features of X, Y, and Z
P1=XY and P2=XZ and P1=X and P2=XZ both lead to the new features XYZ.
Both these parameters are used for visualizations only.
'''
# Unique index Corresponding to its location in a vector (0 based!)
index = None
f_set = None # A set of basic features that this feature corresponds to
# Parent 1 feature index *Basic features have -1 for both nodes
p1 = None
# Parent 2 feature index *Basic features have -1 for both nodes
p2 = None
def __init__(self, potential):
if isinstance(potential, iFDD_potential):
self.index = potential.index
self.f_set = deepcopy(potential.f_set)
self.p1 = potential.p1
self.p2 = potential.p2
else:
# This is the case where a single integer is passed to generate an
# initial feature
index = potential
self.index = index
self.f_set = frozenset([index])
self.p1 = -1
self.p2 = -1
def __deepcopy__(self, memo):
new_f = iFDD_feature(self.index)
new_f.p1 = self.p1
new_f.p2 = self.p2
new_f.f_set = deepcopy(self.f_set)
return new_f
def show(self):
printClass(self)
class iFDD_potential(object):
''' This object represents a potential feature that can be promoted to a permanent feature.
The structure of this object is very similar to iFDD feature object, except it has a relevance parameter
that measures the importance of creating a new feature out of this potential feature.
'''
relevance = None # Sum of errors corresponding to this feature
f_set = None # Set of features it corresponds to [Immutable]
# If this feature has been discovered set this to its index else 0
index = None
p1 = None # Parent 1 feature index
p2 = None # Parent 2 feature index
count = None # Number of times this feature was visited
def __init__(self, f_set, parent1, parent2):
self.f_set = deepcopy(f_set)
self.index = -1 # -1 means it has not been discovered yet
self.p1 = parent1
self.p2 = parent2
self.cumtderr = 0
self.cumabstderr = 0
self.count = 1
def __deepcopy__(self, memo):
new_p = iFDD_potential(self.f_set, self.p1, self.p2)
new_p.cumtderr = self.cumtderr
new_p.cumabstderr = self.cumabstderr
return new_p
def show(self):
printClass(self)
class iFDD(Representation):
''' The incremental Feature Dependency Discovery Representation based on
[Geramifard et al. 2011 ICML paper]. This representation starts with a set of given
binary features and adds new features as the conjunction of existing features. Given n features
iFDD can expand the set of features up to 2^n-1 features (i.e. conjunction of each subset of n
features can be considered as a new feature.
'''
# It is a good starting point to see how relevances grow if threshold is
# set to infinity.
PRINT_MAX_RELEVANCE = False
discovery_threshold = None # psi in the paper
# boolean specifying the use of the trick mentioned in the paper so that
# features are getting sparser with more feature discoveries (i.e. use
# greedy algorithm for feature activation)
sparsify = None
# dictionary mapping initial feature sets to iFDD_feature
iFDD_features = None
# dictionary mapping initial feature sets to iFDD_potential
iFDD_potentials = None
# dictionary mapping each feature index (ID) to its feature object
featureIndex2feature = None
debug = 0 # Print more stuff
# dictionary mapping initial active feature set phi_0(s) to its
# corresponding active features at phi(s). Based on Tuna's Trick to speed
# up iFDD
cache = None
# this should only increase speed. If results are different something is
# wrong
useCache = 0
# Number of features to be expanded in the batch setting
maxBatchDiscovery = 0
# Minimum value of feature relevance for the batch setting
batchThreshold = 0
# ICML 11 iFDD would add sum of abs(TD-errors) while the iFDD plus uses
# the abs(sum(TD-Error))/sqrt(potential feature presence count)
iFDDPlus = 0
# This is a priority queue based on the size of the features (Largest ->
# Smallest). For same size features, it is also sorted based on the newest
# -> oldest. Each element is the pointer to feature object.
sortediFDDFeatures = None
# A Representation that provides the initial set of features for iFDD
initial_representation = None
# Helper parameter to get a sense of appropriate threshold on the
# relevance for discovery
maxRelevance = -np.inf
# As Christoph mentioned adding new features may affect the phi for all
# states. This idea was to make sure both conditions for generating active
# features generate the same result.
use_chirstoph_ordered_features = True
def __init__(
self, domain, discovery_threshold, initial_representation,
sparsify=True, discretization=20, debug=0, useCache=0,
maxBatchDiscovery=1, batchThreshold=0, iFDDPlus=1, seed=1):
self.iFDD_features = {}
self.iFDD_potentials = {}
self.featureIndex2feature = {}
self.cache = {}
self.discovery_threshold = discovery_threshold
self.sparsify = sparsify
self.setBinsPerDimension(domain, discretization)
self.features_num = initial_representation.features_num
self.debug = debug
self.useCache = useCache
self.maxBatchDiscovery = maxBatchDiscovery
self.batchThreshold = batchThreshold
self.sortediFDDFeatures = PriorityQueueWithNovelty()
self.initial_representation = initial_representation
self.iFDDPlus = iFDDPlus
self.isDynamic = True
self.addInitialFeatures()
super(iFDD, self).__init__(domain, discretization, seed)
def phi_nonTerminal(self, s):
""" Based on Tuna's Master Thesis 2012 """
F_s = np.zeros(self.features_num, 'bool')
F_s_0 = self.initial_representation.phi_nonTerminal(
s)
activeIndices = np.where(F_s_0 != 0)[0]
if self.useCache:
finalActiveIndices = self.cache.get(frozenset(activeIndices))
if finalActiveIndices is None:
# run regular and update the cache
finalActiveIndices = self.findFinalActiveFeatures(
activeIndices)
else:
finalActiveIndices = self.findFinalActiveFeatures(
activeIndices)
F_s[finalActiveIndices] = 1
return F_s
def findFinalActiveFeatures(self, intialActiveFeatures):
"""
Given the active indices of phi_0(s) find the final active indices of phi(s) based on discovered features
"""
finalActiveFeatures = []
k = len(intialActiveFeatures)
initialSet = set(intialActiveFeatures)
if 2 ** k <= self.features_num:
# k can be big which can cause this part to be very slow
# if k is large then find active features by enumerating on the
# discovered features.
if self.use_chirstoph_ordered_features:
for i in range(k, 0, -1):
if len(initialSet) == 0:
break
# generate list of all combinations with i elements
cand_i = [(c, self.iFDD_features[frozenset(c)].index)
for c in combinations(initialSet, i)
if frozenset(c) in self.iFDD_features]
# sort (recent features (big ids) first)
cand_i.sort(key=lambda x: x[1], reverse=True)
# idx = -1
for candidate, ind in cand_i:
# the next block is for testing only
# cur_idx = self.iFDD_features[frozenset(candidate)].index
# if idx > 0:
# assert(idx > cur_idx)
# idx = cur_idx
if len(initialSet) == 0:
# No more initial features to be mapped to extended
# ones
break
# This was missing from ICML 2011 paper algorithm.
# Example: [0,1,20], [0,20] is discovered, but if [0]
# is checked before [1] it will be added even though it
# is already covered by [0,20]
if initialSet.issuperset(set(candidate)):
feature = self.iFDD_features.get(
frozenset(candidate))
if feature is not None:
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
else:
for candidate in powerset(initialSet, ascending=0):
if len(initialSet) == 0:
# No more initial features to be mapped to extended
# ones
break
# This was missing from ICML 2011 paper algorithm. Example:
# [0,1,20], [0,20] is discovered, but if [0] is checked
# before [1] it will be added even though it is already
# covered by [0,20]
if initialSet.issuperset(set(candidate)):
feature = self.iFDD_features.get(frozenset(candidate))
if feature is not None:
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
else:
# print "********** Using Alternative: %d > %d" % (2**k, self.features_num)
# Loop on all features sorted on their size and then novelty and
# activate features
for feature in self.sortediFDDFeatures.toList():
if len(initialSet) == 0:
# No more initial features to be mapped to extended ones
break
if initialSet.issuperset(set(feature.f_set)):
finalActiveFeatures.append(feature.index)
if self.sparsify:
# print "Sets:", initialSet, feature.f_set
initialSet = initialSet - feature.f_set
# print "Remaining Set:", initialSet
if self.useCache:
self.cache[frozenset(intialActiveFeatures)] = finalActiveFeatures
return finalActiveFeatures
def post_discover(self, s, terminal, a, td_error, phi_s):
"""
returns the number of added features
"""
# Indices of non-zero elements of vector phi_s
activeFeatures = phi_s.nonzero()[0]
discovered = 0
for g_index, h_index in combinations(activeFeatures, 2):
discovered += self.inspectPair(g_index, h_index, td_error)
return discovered
def inspectPair(self, g_index, h_index, td_error):
# Inspect feature f = g union h where g_index and h_index are the indices of features g and h
# If the relevance is > Threshold add it to the list of features
# Returns True if a new feature is added
g = self.featureIndex2feature[g_index].f_set
h = self.featureIndex2feature[h_index].f_set
f = g.union(h)
feature = self.iFDD_features.get(f)
if not self.iFDDPlus:
td_error = abs(td_error)
if feature is not None:
# Already exists
return False
# Look it up in potentials
potential = self.iFDD_potentials.get(f)
if potential is None:
# Generate a new potential and put it in the dictionary
potential = iFDD_potential(f, g_index, h_index)
self.iFDD_potentials[f] = potential
potential.cumtderr += td_error
potential.cumabstderr += abs(td_error)
potential.count += 1
# Check for discovery
if self.random_state.rand() < self.iFDDPlus:
relevance = old_div(abs(potential.cumtderr), np.sqrt(potential.count))
else:
relevance = potential.cumabstderr
if relevance >= self.discovery_threshold:
self.maxRelevance = -np.inf
self.addFeature(potential)
return True
else:
self.updateMaxRelevance(relevance)
return False
def show(self):
self.showFeatures()
self.showPotentials()
self.showCache()
def updateWeight(self, p1_index, p2_index):
# Add a new weight corresponding to the new added feature for all actions.
# The new weight is set to zero if sparsify = False, and equal to the
# sum of weights corresponding to the parents if sparsify = True
a = self.domain.actions_num
# Number of feature before adding the new one
f = self.features_num - 1
if self.sparsify:
newElem = (self.weight_vec[p1_index::f] +
self.weight_vec[p2_index::f]).reshape((-1, 1))
else:
newElem = None
self.weight_vec = addNewElementForAllActions(self.weight_vec, a, newElem)
# We dont want to reuse the hased phi because phi function is changed!
self.hashed_s = None
def addInitialFeatures(self):
for i in range(self.initial_representation.features_num):
feature = iFDD_feature(i)
# shout(self,self.iFDD_features[frozenset([i])].index)
self.iFDD_features[frozenset([i])] = feature
self.featureIndex2feature[feature.index] = feature
# priority is 1/number of initial features corresponding to the
# feature
priority = 1
self.sortediFDDFeatures.push(priority, feature)
def addFeature(self, potential):
# Add it to the list of features
# Features_num is always one more than the max index (0-based)
potential.index = self.features_num
self.features_num += 1
feature = iFDD_feature(potential)
self.iFDD_features[potential.f_set] = feature
# Expand the size of the weight_vec
self.updateWeight(feature.p1, feature.p2)
# Update the index to feature dictionary
self.featureIndex2feature[feature.index] = feature
# print "IN IFDD, New Feature = %d => Total Features = %d" % (feature.index, self.features_num)
# Update the sorted list of features
# priority is 1/number of initial features corresponding to the feature
priority = old_div(1, (len(potential.f_set) * 1.))
self.sortediFDDFeatures.push(priority, feature)
# If you use cache, you should invalidate entries that their initial
# set contains the set corresponding to the new feature
if self.useCache:
for initialActiveFeatures in list(self.cache.keys()):
if initialActiveFeatures.issuperset(feature.f_set):
if self.sparsify:
self.cache.pop(initialActiveFeatures)
else:
# If sparsification is not used, simply add the new
# feature id to all cached values that have feature set
# which is a super set of the features corresponding to
# the new discovered feature
self.cache[initialActiveFeatures].append(feature.index)
if self.debug:
self.show()
def batchDiscover(self, td_errors, phi, states):
# Discovers features using iFDD in batch setting.
# TD_Error: p-by-1 (How much error observed for each sample)
# phi: n-by-p features corresponding to all samples (each column corresponds to one sample)
# self.batchThreshold is the minimum relevance value for the feature to
# be expanded
SHOW_PLOT = 0 # Shows the histogram of relevances
maxDiscovery = self.maxBatchDiscovery
n = self.features_num # number of features
p = len(td_errors) # Number of samples
counts = np.zeros((n, n))
relevances = np.zeros((n, n))
for i in range(p):
phiphiT = np.outer(phi[i, :], phi[i, :])
if self.iFDDPlus:
relevances += phiphiT * td_errors[i]
else:
relevances += phiphiT * abs(td_errors[i])
counts += phiphiT
# Remove Diagonal and upper part of the relevances as they are useless
relevances = np.triu(relevances, 1)
non_zero_index = np.nonzero(relevances)
if self.iFDDPlus:
# Calculate relevances based on theoretical results of ICML 2013
# potential submission
relevances[non_zero_index] = np.divide(
np.abs(relevances[non_zero_index]),
np.sqrt(counts[non_zero_index]))
else:
# Based on Geramifard11_ICML Paper
relevances[non_zero_index] = relevances[non_zero_index]
# Find indexes to non-zero excited pairs
# F1 and F2 are the parents of the potentials
(F1, F2) = relevances.nonzero()
relevances = relevances[F1, F2]
if len(relevances) == 0:
# No feature to add
self.logger.debug("iFDD Batch: Max Relevance = 0")
return False
if SHOW_PLOT:
e_vec = relevances.flatten()
e_vec = e_vec[e_vec != 0]
e_vec = np.sort(e_vec)
plt.ioff()
plt.plot(e_vec, linewidth=3)
plt.show()
# Sort based on relevances
# We want high to low hence the reverse: [::-1]
sortedIndices = np.argsort(relevances)[::-1]
max_relevance = relevances[sortedIndices[0]]
# Add top <maxDiscovery> features
self.logger.debug(
"iFDD Batch: Max Relevance = {0:g}".format(max_relevance))
added_feature = False
new_features = 0
for j in range(len(relevances)):
if new_features >= maxDiscovery:
break
max_index = sortedIndices[j]
f1 = F1[max_index]
f2 = F2[max_index]
relevance = relevances[max_index]
if relevance > self.batchThreshold:
# print "Inspecting",
# f1,f2,'=>',self.getStrFeatureSet(f1),self.getStrFeatureSet(f2)
if self.inspectPair(f1, f2, np.inf):
self.logger.debug(
'New Feature %d: %s, Relevance = %0.3f' %
(self.features_num - 1, self.getStrFeatureSet(self.features_num - 1), relevances[max_index]))
new_features += 1
added_feature = True
else:
# Because the list is sorted, there is no use to look at the
# others
break
return (
# A signal to see if the representation has been expanded or not
added_feature
)
def showFeatures(self):
print("Features:")
print("-" * 30)
print(" index\t| f_set\t| p1\t| p2\t | Weights (per action)")
print("-" * 30)
for feature in reversed(self.sortediFDDFeatures.toList()):
# for feature in self.iFDD_features.itervalues():
# print " %d\t| %s\t| %s\t| %s\t| %s" %
# (feature.index,str(list(feature.f_set)),feature.p1,feature.p2,str(self.weight_vec[feature.index::self.features_num]))
print(" %d\t| %s\t| %s\t| %s\t| Omitted" % (feature.index, self.getStrFeatureSet(feature.index), feature.p1, feature.p2))
def showPotentials(self):
print("Potentials:")
print("-" * 30)
print(" index\t| f_set\t| relevance\t| count\t| p1\t| p2")
print("-" * 30)
for _, potential in self.iFDD_potentials.items():
print(" %d\t| %s\t| %0.2f\t| %d\t| %s\t| %s" % (potential.index, str(np.sort(list(potential.f_set))), potential.relevance, potential.count, potential.p1, potential.p2))
def showCache(self):
if self.useCache:
print("Cache:")
if len(self.cache) == 0:
print('EMPTY!')
return
print("-" * 30)
print(" initial\t| Final")
print("-" * 30)
for initial, active in self.cache.items():
print(" %s\t| %s" % (str(list(initial)), active))
def updateMaxRelevance(self, newRelevance):
# Update a global max relevance and outputs it if it is updated
if self.maxRelevance < newRelevance:
self.maxRelevance = newRelevance
if self.PRINT_MAX_RELEVANCE:
self.logger.debug(
"iFDD Batch: Max Relevance = {0:g}".format(newRelevance))
def getFeature(self, f_id):
# returns a feature given a feature id
if f_id in list(self.featureIndex2feature.keys()):
return self.featureIndex2feature[f_id]
else:
print("F_id %d is not valid" % f_id)
return None
def getStrFeatureSet(self, f_id):
# returns a string that corresponds to the set of features specified by
# the given feature_id
if f_id in list(self.featureIndex2feature.keys()):
return str(sorted(list(self.featureIndex2feature[f_id].f_set)))
else:
print("F_id %d is not valid" % f_id)
return None
def featureType(self):
return bool
def __deepcopy__(self, memo):
ifdd = iFDD(
self.domain,
self.discovery_threshold,
self.initial_representation,
self.sparsify,
self.discretization,
self.debug,
self.useCache,
self.maxBatchDiscovery,
self.batchThreshold,
self.iFDDPlus)
for s, f in list(self.iFDD_features.items()):
new_f = deepcopy(f)
new_s = deepcopy(s)
ifdd.iFDD_features[new_s] = new_f
ifdd.featureIndex2feature[new_f.index] = new_f
for s, p in list(self.iFDD_potentials.items()):
new_s = deepcopy(s)
new_p = deepcopy(p)
ifdd.iFDD_potentials[new_s] = deepcopy(new_p)
ifdd.cache = deepcopy(self.cache)
ifdd.sortediFDDFeatures = deepcopy(self.sortediFDDFeatures)
ifdd.features_num = self.features_num
ifdd.weight_vec = deepcopy(self.weight_vec)
return ifdd
class iFDDK_potential(iFDD_potential):
f_set = None # Set of features it corresponds to [Immutable]
index = None # If this feature has been discovered set this to its index else 0
p1 = None # Parent 1 feature index
p2 = None # Parent 2 feature index
a = None # tE[phi |\delta|] estimate
b = None # tE[phi \delta] estimate
c = 0. # || phi ||^2_d estimate
n_crho = 0 # rho episode index of last update
e = None # eligibility trace
nu = 0. # w value of last statistics update
x_a = 0. # y_a value of last statistics update
x_b = 0. # y_b value of last stistics update
l = 0 # t value of last statistics update
def __init__(self, f_set, parent1, parent2):
self.f_set = deepcopy(f_set)
self.index = -1 # -1 means it has not been discovered yet
self.p1 = parent1
self.p2 = parent2
try:
self.hp_dtype = np.dtype('float128')
except TypeError:
self.hp_dtype = np.dtype('float64')
self.a = np.array(0., dtype=self.hp_dtype)
self.b = np.array(0., dtype=self.hp_dtype)
self.e = np.array(0., dtype=self.hp_dtype)
def relevance(self, kappa=None, plus=None):
if plus is None:
assert(kappa is not None)
plus = self.random_state.rand() >= kappa
if plus:
return old_div(
|
np.abs(self.b)
|
numpy.abs
|
# Released under CC0.
# Summary: https://creativecommons.org/publicdomain/zero/1.0/
# Legal Code: https://creativecommons.org/publicdomain/zero/1.0/legalcode.txt
import numpy as np
from pyscripts.zfc import (cartesian, interleave, is_float, is_int, n_dec,
np_info, np_ncols, np_nrows, pairtable, seq,
triangle_n, triangle_n_inv)
def test_cartesian():
assert np.array_equal(
cartesian(np.array([1, 2, 3]),
np.array([4, 5, 6])),
np.array([
[1, 4],
[1, 5],
[1, 6],
[2, 4],
[2, 5],
[2, 6],
[3, 4],
[3, 5],
[3, 6]
])
)
def test_interleave():
assert interleave(list('abc'), list('def')) == \
['a', 'd', 'b', 'e', 'c', 'f']
assert interleave(list('abc'), list('defz')) == \
['a', 'd', 'b', 'e', 'c', 'f', 'z']
assert interleave(list('abcz'), list('def')) == \
['a', 'd', 'b', 'e', 'c', 'f', 'z']
def test_is_float():
assert is_float(0.)
assert is_float(10.)
assert is_float(0.123)
assert is_float(10.123)
assert is_float(np.float32(0))
assert is_float(np.float64(0))
assert not is_float(0)
assert not is_float(123)
assert not is_float(np.int32(0))
assert not is_float(np.int64(123))
def test_is_int():
assert is_int(0)
assert is_int(123)
assert is_int(np.int32(0))
assert is_int(np.int64(123))
assert not is_int(0.)
assert not is_int(10.)
assert not is_int(0.123)
assert not is_int(10.123)
assert not is_int(np.float32(0))
assert not is_int(np.float64(0))
def test_np_info():
d = np_info(seq(1, 100).reshape(10, 10))
assert d['type'] == np.ndarray
assert d['dtype'] == np.int32
assert d['size'] == 100
assert d['shape'] == (10, 10)
assert d['nbytes'] == 400
assert len(d.keys()) == 5
def test_np_ncols():
assert np_ncols(np.array(
5
)) is None
assert np_ncols(np.array(
[1, 2, 3, 4, 5]
)) == 5
assert np_ncols(np.array(
[[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6]]
)) == 5
assert np_ncols(np.array(
[[1],
[2],
[3],
[4],
[5]]
)) == 1
def test_np_nrows():
assert np_nrows(np.array(
5
)) is None
assert np_nrows(np.array(
[1, 2, 3, 4, 5]
)) == 1
assert np_nrows(np.array(
[[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6]]
)) == 2
assert np_nrows(np.array(
[[1],
[2],
[3],
[4],
[5]]
)) == 5
def test_pairtable():
assert np.array_equal(
pairtable(np.array([1, 2, 3, 4, 5, 6])),
np.array([
[1, 2, 3],
[None, 4, 5],
[None, None, 6]
])
)
def test_triangle_n():
assert triangle_n(0) == 0
assert triangle_n(1) == 1
assert triangle_n(2) == 3
assert triangle_n(3) == 6
assert triangle_n(4) == 10
assert triangle_n(5) == 15
assert triangle_n(100) == 5050
def test_triangle_n_inv():
assert triangle_n_inv(0) == 0
assert triangle_n_inv(1) == 1
assert triangle_n_inv(2) is None
assert triangle_n_inv(3) == 2
assert triangle_n_inv(4) is None
assert triangle_n_inv(5) is None
assert triangle_n_inv(6) == 3
assert triangle_n_inv(7) is None
assert triangle_n_inv(8) is None
assert triangle_n_inv(9) is None
assert triangle_n_inv(10) == 4
assert triangle_n_inv(15) == 5
assert triangle_n_inv(5050) == 100
def test_n_dec():
assert n_dec(100) == 0
assert n_dec(10) == 0
assert n_dec(0) == 0
assert n_dec(0.) == 1
assert n_dec(0.1) == 1
assert n_dec(0.12) == 2
assert n_dec(0.123) == 3
assert n_dec(0.1234) == 4
assert n_dec(0.12345) == 5
def test_seq():
# float
assert np.array_equal(
seq(0, 1, 0.1),
np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.])
)
assert np.array_equal(
seq(1, 0, -0.1),
np.array([1., 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.])
)
assert np.array_equal(
seq(1.01, 3.01, 1),
np.array([1.01, 2.01, 3.01])
)
assert np.array_equal(
seq(1.01, 3.02, 1),
np.array([1.01, 2.01, 3.01])
)
assert np.array_equal(
seq(1.01, 3.009, 1),
np.array([1.01, 2.01])
)
assert np.array_equal(
seq(0.1, 0.4, 0.1),
np.array([0.1, 0.2, 0.3, 0.4])
)
assert np.array_equal(seq(1, 10, 2.5), np.array([1., 3.5, 6., 8.5]))
assert np.array_equal(seq(10, 1, -2.5), np.array([10, 7.5, 5., 2.5]))
# int
assert np.array_equal(seq(1, 5), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(seq(5, 1), np.array([5, 4, 3, 2, 1]))
assert np.array_equal(seq(-1, -5), np.array([-1, -2, -3, -4, -5]))
assert np.array_equal(seq(-5, -1),
|
np.array([-5, -4, -3, -2, -1])
|
numpy.array
|
import os, sys, time, shutil, tempfile, datetime, pathlib
import numpy as np
from tqdm import trange, tqdm
from urllib.parse import urlparse
import tempfile
from scipy.ndimage import median_filter
import cv2
from mxnet import gluon, nd
import mxnet as mx
from . import transforms, dynamics, utils, resnet_style, plot, metrics
import __main__
def use_gpu(gpu_number=0):
""" check if mxnet gpu works """
try:
_ = mx.ndarray.array([1, 2, 3], ctx=mx.gpu(gpu_number))
print('** CUDA version installed and working. **')
return True
except mx.MXNetError:
print('CUDA version not installed/working, will use CPU version.')
return False
def check_mkl():
print('Running test snippet to check if MKL running (https://mxnet.apache.org/versions/1.6/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html#4)')
process = subprocess.Popen(['python', 'test_mkl.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
stdout, stderr = process.communicate()
if len(stdout)>0:
print('** MKL version working - CPU version is fast. **')
mkl_enabled = True
else:
print('WARNING: MKL version not working/installed - CPU version will be SLOW!')
mkl_enabled = False
return mkl_enabled
def dx_to_circ(dP):
""" dP is 2 x Y x X => 'optic' flow representation """
sc = max(np.percentile(dP[0], 99), np.percentile(dP[0], 1))
Y = np.clip(dP[0] / sc, -1, 1)
sc = max(np.percentile(dP[1], 99), np.percentile(dP[1], 1))
X = np.clip(dP[1] / sc, -1, 1)
H = (np.arctan2(Y, X) + np.pi) / (2*np.pi)
S = utils.normalize99(dP[0]**2 + dP[1]**2)
V = np.ones_like(S)
HSV = np.concatenate((H[:,:,np.newaxis], S[:,:,np.newaxis], S[:,:,np.newaxis]), axis=-1)
HSV = np.clip(HSV, 0.0, 1.0)
flow = (utils.hsv_to_rgb(HSV)*255).astype(np.uint8)
return flow
class Cellpose():
""" main model which combines SizeModel and CellposeModel
Parameters
----------
gpu: bool (optional, default False)
whether or not to save model to GPU, will check if GPU available
model_type: str (optional, default 'cyto')
'cyto'=cytoplasm model; 'nuclei'=nucleus model
net_avg: bool (optional, default True)
loads the 4 built-in networks and averages them if True, loads one network if False
device: mxnet device (optional, default None)
where model is saved (mx.gpu() or mx.cpu()), overrides gpu input,
recommended if you want to use a specific GPU (e.g. mx.gpu(4))
"""
def __init__(self, gpu=False, model_type='cyto', net_avg=True, device=None):
super(Cellpose, self).__init__()
# assign device (GPU or CPU)
if device is not None:
self.device = device
elif gpu and use_gpu():
self.device = mx.gpu()
print('>>>> using GPU')
else:
self.device = mx.cpu()
print('>>>> using CPU')
model_dir = pathlib.Path.home().joinpath('.cellpose', 'models')
if model_type is None:
model_type = 'cyto'
self.pretrained_model = [os.fspath(model_dir.joinpath('%s_%d'%(model_type,j))) for j in range(4)]
self.pretrained_size = os.fspath(model_dir.joinpath('size_%s_0.npy'%(model_type)))
if model_type=='cyto':
self.diam_mean = 30.
else:
self.diam_mean = 17.
if not os.path.isfile(self.pretrained_model[0]):
download_model_weights()
if not net_avg:
self.pretrained_model = self.pretrained_model[0]
self.cp = CellposeModel(device=self.device,
pretrained_model=self.pretrained_model,
diam_mean=self.diam_mean)
self.cp.model_type = model_type
self.sz = SizeModel(device=self.device, pretrained_size=self.pretrained_size,
cp_model=self.cp)
self.sz.model_type = model_type
def eval(self, x, batch_size=8, channels=None, invert=False, normalize=True, diameter=30., do_3D=False, anisotropy=None,
net_avg=True, augment=False, tile=True, resample=False, flow_threshold=0.4, cellprob_threshold=0.0,
min_size=15, stitch_threshold=0.0, rescale=None, progress=None):
""" run cellpose and get masks
Parameters
----------
x: list or array of images
can be list of 2D/3D images, or array of 2D/3D images, or 4D image array
batch_size: int (optional, default 8)
number of 224x224 patches to run simultaneously on the GPU
(can make smaller or bigger depending on GPU memory usage)
channels: list (optional, default None)
list of channels, either of length 2 or of length number of images by 2.
First element of list is the channel to segment (0=grayscale, 1=red, 2=blue, 3=green).
Second element of list is the optional nuclear channel (0=none, 1=red, 2=blue, 3=green).
For instance, to segment grayscale images, input [0,0]. To segment images with cells
in green and nuclei in blue, input [2,3]. To segment one grayscale image and one
image with cells in green and nuclei in blue, input [[0,0], [2,3]].
invert: bool (optional, default False)
invert image pixel intensity before running network (if True, image is also normalized)
normalize: bool (optional, default True)
normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel
diameter: float (optional, default 30.)
if set to None, then diameter is automatically estimated if size model is loaded
do_3D: bool (optional, default False)
set to True to run 3D segmentation on 4D image input
anisotropy: float (optional, default None)
for 3D segmentation, optional rescaling factor (e.g. set to 2.0 if Z is sampled half as dense as X or Y)
net_avg: bool (optional, default True)
runs the 4 built-in networks and averages them if True, runs one network if False
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended)
flow_threshold: float (optional, default 0.4)
flow error threshold (all cells with errors below threshold are kept) (not used for 3D)
cellprob_threshold: float (optional, default 0.0)
cell probability threshold (all pixels with prob above threshold kept for masks)
min_size: int (optional, default 15)
minimum number of pixels per mask, can turn off with -1
stitch_threshold: float (optional, default 0.0)
if stitch_threshold>0.0 and not do_3D and equal image sizes, masks are stitched in 3D to return volume segmentation
rescale: float (optional, default None)
if diameter is set to None, and rescale is not None, then rescale is used instead of diameter for resizing image
progress: pyqt progress bar (optional, default None)
to return progress bar status to GUI
Returns
-------
masks: list of 2D arrays, or single 3D array (if do_3D=True)
labelled image, where 0=no masks; 1,2,...=mask labels
flows: list of lists 2D arrays, or list of 3D arrays (if do_3D=True)
flows[k][0] = XY flow in HSV 0-255
flows[k][1] = flows at each pixel
flows[k][2] = the cell probability centered at 0.0
styles: list of 1D arrays of length 64, or single 1D array (if do_3D=True)
style vector summarizing each image, also used to estimate size of objects in image
diams: list of diameters, or float (if do_3D=True)
"""
if not isinstance(x,list):
nolist = True
if x.ndim < 2 or x.ndim > 5:
raise ValueError('%dD images not supported'%x.ndim)
if x.ndim==4:
if do_3D:
x = [x]
else:
x = list(x)
nolist = False
elif x.ndim==5:
if do_3D:
x = list(x)
nolist = False
else:
raise ValueError('4D images must be processed using 3D')
else:
x = [x]
else:
nolist = False
for xi in x:
if xi.ndim < 2 or xi.ndim > 5:
raise ValueError('%dD images not supported'%xi.ndim)
tic0 = time.time()
nimg = len(x)
print('processing %d image(s)'%nimg)
# make rescale into length of x
if diameter is not None and diameter!=0:
if not isinstance(diameter, list) or len(diameter)==1 or len(diameter)<nimg:
diams = diameter * np.ones(nimg, np.float32)
else:
diams = diameter
rescale = self.diam_mean / diams
else:
if rescale is not None and (not isinstance(rescale, list) or len(rescale)==1):
rescale = rescale * np.ones(nimg, np.float32)
if self.pretrained_size is not None and rescale is None and not do_3D:
tic = time.time()
diams, _ = self.sz.eval(x, channels=channels, invert=invert, batch_size=batch_size, augment=augment, tile=tile)
rescale = self.diam_mean / diams
print('estimated cell diameters for %d image(s) in %0.2f sec'%(nimg, time.time()-tic))
else:
if rescale is None:
if do_3D:
rescale = np.ones(1)
else:
rescale = np.ones(nimg, np.float32)
diams = self.diam_mean / rescale
tic = time.time()
masks, flows, styles = self.cp.eval(x, batch_size=batch_size, invert=invert, rescale=rescale, anisotropy=anisotropy,
channels=channels, augment=augment, tile=tile, do_3D=do_3D,
net_avg=net_avg, progress=progress,
resample=resample,
flow_threshold=flow_threshold,
cellprob_threshold=cellprob_threshold,
min_size=min_size, stitch_threshold=stitch_threshold)
print('estimated masks for %d image(s) in %0.2f sec'%(nimg, time.time()-tic))
print('>>>> TOTAL TIME %0.2f sec'%(time.time()-tic0))
if nolist:
masks, flows, styles, diams = masks[0], flows[0], styles[0], diams[0]
return masks, flows, styles, diams
def parse_model_string(pretrained_model):
if isinstance(pretrained_model, list):
model_str = os.path.split(pretrained_model[0])[-1]
else:
model_str = os.path.split(pretrained_model)[-1]
if len(model_str)>3 and model_str[:4]=='unet':
print('parsing model string to get unet options')
nclasses = max(2, int(model_str[4]))
elif len(model_str)>7 and model_str[:8]=='cellpose':
print('parsing model string to get cellpose options')
nclasses = 3
else:
return None
ostrs = model_str.split('_')[2::2]
residual_on = ostrs[0]=='on'
style_on = ostrs[1]=='on'
concatenation = ostrs[2]=='on'
return nclasses, residual_on, style_on, concatenation
class UnetModel():
def __init__(self, gpu=False, pretrained_model=False,
diam_mean=30., net_avg=True, device=None,
residual_on=False, style_on=False, concatenation=True,
nclasses = 3):
self.unet = True
if device is not None:
self.device = device
elif gpu and use_gpu():
self.device = mx.gpu()
print('>>>> using GPU')
else:
self.device = mx.cpu()
print('>>>> using CPU')
self.pretrained_model = pretrained_model
self.diam_mean = diam_mean
if pretrained_model:
params = parse_model_string(pretrained_model)
if params is not None:
nclasses, residual_on, style_on, concatenation = params
ostr = ['off', 'on']
self.net_type = 'unet{}_residual_{}_style_{}_concatenation_{}'.format(nclasses,
ostr[residual_on],
ostr[style_on],
ostr[concatenation])
if pretrained_model:
print(self.net_type)
# create network
self.nclasses = nclasses
nbase = [32,64,128,256]
self.net = resnet_style.CPnet(nbase, nout=self.nclasses,
residual_on=residual_on,
style_on=style_on,
concatenation=concatenation)
self.net.hybridize(static_alloc=True, static_shape=True)
self.net.initialize(ctx = self.device)
if pretrained_model is not None and isinstance(pretrained_model, str):
self.net.load_parameters(pretrained_model)
def eval(self, x, batch_size=8, channels=None, invert=False, normalize=True,
rescale=None, do_3D=False, anisotropy=None, net_avg=True, augment=False,
tile=True, cell_threshold=None, boundary_threshold=None, min_size=15):
""" segment list of images x
Parameters
----------
x: list or array of images
can be list of 2D/3D images, or array of 2D/3D images, or 4D image array
batch_size: int (optional, default 8)
number of 224x224 patches to run simultaneously on the GPU
(can make smaller or bigger depending on GPU memory usage)
channels: list (optional, default None)
list of channels, either of length 2 or of length number of images by 2.
First element of list is the channel to segment (0=grayscale, 1=red, 2=blue, 3=green).
Second element of list is the optional nuclear channel (0=none, 1=red, 2=blue, 3=green).
For instance, to segment grayscale images, input [0,0]. To segment images with cells
in green and nuclei in blue, input [2,3]. To segment one grayscale image and one
image with cells in green and nuclei in blue, input [[0,0], [2,3]].
invert: bool (optional, default False)
invert image pixel intensity before running network
normalize: bool (optional, default True)
normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel
rescale: float (optional, default None)
resize factor for each image, if None, set to 1.0
do_3D: bool (optional, default False)
set to True to run 3D segmentation on 4D image input
anisotropy: float (optional, default None)
for 3D segmentation, optional rescaling factor (e.g. set to 2.0 if Z is sampled half as dense as X or Y)
net_avg: bool (optional, default True)
runs the 4 built-in networks and averages them if True, runs one network if False
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended)
cell_threshold: float (optional, default 0.0)
cell probability threshold (all pixels with prob above threshold kept for masks)
boundary_threshold: float (optional, default 0.0)
cell probability threshold (all pixels with prob above threshold kept for masks)
min_size: int (optional, default 15)
minimum number of pixels per mask, can turn off with -1
Returns
-------
masks: list of 2D arrays, or single 3D array (if do_3D=True)
labelled image, where 0=no masks; 1,2,...=mask labels
flows: list of lists 2D arrays, or list of 3D arrays (if do_3D=True)
flows[k][0] = XY flow in HSV 0-255
flows[k][1] = flows at each pixel
flows[k][2] = the cell probability centered at 0.0
styles: list of 1D arrays of length 64, or single 1D array (if do_3D=True)
style vector summarizing each image, also used to estimate size of objects in image
"""
x, nolist = convert_images(x, channels, do_3D, normalize, invert)
nimg = len(x)
self.batch_size = batch_size
styles = []
flows = []
masks = []
if rescale is None:
rescale = np.ones(nimg)
elif isinstance(rescale, float):
rescale = rescale * np.ones(nimg)
if nimg > 1:
iterator = trange(nimg)
else:
iterator = range(nimg)
if isinstance(self.pretrained_model, list):
model_path = self.pretrained_model[0]
if not net_avg:
self.net.load_parameters(self.pretrained_model[0])
self.net.collect_params().grad_req = 'null'
else:
model_path = self.pretrained_model
if cell_threshold is None or boundary_threshold is None:
try:
thresholds = np.load(model_path+'_cell_boundary_threshold.npy')
cell_threshold, boundary_threshold = thresholds
print('>>>> found saved thresholds from validation set')
except:
print('WARNING: no thresholds found, using default / user input')
cell_threshold = 2.0 if cell_threshold is None else cell_threshold
boundary_threshold = 0.5 if boundary_threshold is None else boundary_threshold
if not do_3D:
for i in iterator:
img = x[i].copy()
shape = img.shape
# rescale image for flow computation
imgs = transforms.resize_image(img, rsz=rescale[i])
y, style = self._run_nets(img, net_avg=net_avg, augment=augment, tile=tile)
maski = utils.get_masks_unet(y, cell_threshold, boundary_threshold)
maski = utils.fill_holes_and_remove_small_masks(maski, min_size=min_size)
maski = transforms.resize_image(maski, shape[-3], shape[-2],
interpolation=cv2.INTER_NEAREST)
masks.append(maski)
styles.append(style)
else:
for i in iterator:
tic=time.time()
yf, style = self._run_3D(x[i], rsz=rescale[i], anisotropy=anisotropy,
net_avg=net_avg, augment=augment, tile=tile)
yf = yf.mean(axis=0)
print('probabilities computed %2.2fs'%(time.time()-tic))
maski = utils.get_masks_unet(yf.transpose((1,2,3,0)), cell_threshold, boundary_threshold)
maski = utils.fill_holes_and_remove_small_masks(maski, min_size=min_size)
masks.append(maski)
styles.append(style)
print('masks computed %2.2fs'%(time.time()-tic))
flows.append(yf)
if nolist:
masks, flows, styles = masks[0], flows[0], styles[0]
return masks, flows, styles
def _run_nets(self, img, net_avg=True, augment=False, tile=True, bsize=224, progress=None):
""" run network (if more than one, loop over networks and average results
Parameters
--------------
img: float, [Ly x Lx x nchan] or [Lz x Ly x Lx x nchan]
net_avg: bool (optional, default True)
runs the 4 built-in networks and averages them if True, runs one network if False
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU memory usage limited (recommended)
progress: pyqt progress bar (optional, default None)
to return progress bar status to GUI
Returns
------------------
y: array [3 x Ly x Lx] or [3 x Lz x Ly x Lx]
y is output (averaged over networks);
y[0] is Y flow; y[1] is X flow; y[2] is cell probability
style: array [64]
1D array summarizing the style of the image,
if tiled it is averaged over tiles,
but not averaged over networks.
"""
if isinstance(self.pretrained_model, str) or not net_avg:
y, style = self._run_net(img, augment=augment, tile=tile, bsize=bsize)
else:
for j in range(len(self.pretrained_model)):
self.net.load_parameters(self.pretrained_model[j])
self.net.collect_params().grad_req = 'null'
y0, style = self._run_net(img, augment=augment, tile=tile, bsize=bsize)
if j==0:
y = y0
else:
y += y0
if progress is not None:
progress.setValue(10 + 10*j)
y = y / len(self.pretrained_model)
return y, style
def _run_net(self, imgs, augment=False, tile=True, bsize=224):
""" run network on image or stack of images
(faster if augment is False)
Parameters
--------------
imgs: array [Ly x Lx x nchan] or [Lz x Ly x Lx x nchan]
rsz: float (optional, default 1.0)
resize coefficient(s) for image
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended);
cannot be turned off for 3D segmentation
bsize: int (optional, default 224)
size of tiles to use in pixels [bsize x bsize]
Returns
------------------
y: array [Ly x Lx x 3] or [Lz x Ly x Lx x 3]
y[...,0] is Y flow; y[...,1] is X flow; y[...,2] is cell probability
style: array [64]
1D array summarizing the style of the image,
if tiled it is averaged over tiles
"""
if imgs.ndim==4:
# make image Lz x nchan x Ly x Lx for net
imgs = np.transpose(imgs, (0,3,1,2))
detranspose = (0,2,3,1)
else:
# make image nchan x Ly x Lx for net
imgs = np.transpose(imgs, (2,0,1))
detranspose = (1,2,0)
# pad image for net so Ly and Lx are divisible by 4
imgs, ysub, xsub = transforms.pad_image_ND(imgs)
# slices from padding
slc = [slice(0, imgs.shape[n]+1) for n in range(imgs.ndim)]
slc[-2] = slice(ysub[0], ysub[-1]+1)
slc[-1] = slice(xsub[0], xsub[-1]+1)
slc = tuple(slc)
# run network
if tile or augment or imgs.ndim==4:
y,style = self._run_tiled(imgs, augment=augment, bsize=bsize)
else:
imgs = nd.array(np.expand_dims(imgs, axis=0), ctx=self.device)
y,style = self.net(imgs)
y = y[0].asnumpy()
imgs = imgs.asnumpy()
style = style.asnumpy()[0]
style /= (style**2).sum()**0.5
# slice out padding
y = y[slc]
# transpose so channels axis is last again
y = np.transpose(y, detranspose)
return y, style
def _run_tiled(self, imgi, augment=False, bsize=224):
""" run network in tiles of size [bsize x bsize]
First image is split into overlapping tiles of size [bsize x bsize].
If augment, tiles have 50% overlap and are flipped at overlaps.
The average of the network output over tiles is returned.
Parameters
--------------
imgi: array [nchan x Ly x Lx] or [Lz x nchan x Ly x Lx]
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
bsize: int (optional, default 224)
size of tiles to use in pixels [bsize x bsize]
Returns
------------------
yf: array [3 x Ly x Lx] or [Lz x 3 x Ly x Lx]
yf is averaged over tiles
yf[0] is Y flow; yf[1] is X flow; yf[2] is cell probability
styles: array [64]
1D array summarizing the style of the image, averaged over tiles
"""
if imgi.ndim==4:
Lz, nchan = imgi.shape[:2]
IMG, ysub, xsub, Ly, Lx = transforms.make_tiles(imgi[0], bsize=bsize, augment=augment)
ny, nx, nchan = IMG.shape[:3]
yf = np.zeros((Lz, self.nclasses, imgi.shape[2], imgi.shape[3]), np.float32)
styles = []
if ny*nx > self.batch_size:
ziterator = trange(Lz)
for i in ziterator:
yfi, stylei = self._run_tiled(imgi[i], augment=augment, bsize=bsize)
yf[i] = yfi
styles.append(stylei)
else:
# run multiple slices at the same time
ntiles = ny*nx
nimgs = max(2, int(np.round(self.batch_size / ntiles)))
niter = int(np.ceil(Lz/nimgs))
ziterator = trange(niter)
for k in ziterator:
IMGa = np.zeros((ntiles*nimgs, nchan, bsize, bsize), np.float32)
for i in range(min(Lz-k*nimgs, nimgs)):
IMG, ysub, xsub, Ly, Lx = transforms.make_tiles(imgi[k*nimgs+i], bsize=bsize, augment=augment)
IMGa[i*ntiles:(i+1)*ntiles] = np.reshape(IMG, (ny*nx, nchan, bsize, bsize))
y0, style = self.net(nd.array(IMGa, ctx=self.device))
ya = y0.asnumpy()
stylea = style.asnumpy()
for i in range(min(Lz-k*nimgs, nimgs)):
y = ya[i*ntiles:(i+1)*ntiles]
if augment:
y = np.reshape(y, (ny, nx, 3, bsize, bsize))
y = transforms.unaugment_tiles(y, self.unet)
y = np.reshape(y, (-1, 3, bsize, bsize))
yfi = transforms.average_tiles(y, ysub, xsub, Ly, Lx)
yfi = yfi[:,:imgi.shape[2],:imgi.shape[3]]
yf[k*nimgs+i] = yfi
stylei = stylea[i*ntiles:(i+1)*ntiles].sum(axis=0)
stylei /= (stylei**2).sum()**0.5
styles.append(stylei)
return yf, np.array(styles)
else:
IMG, ysub, xsub, Ly, Lx = transforms.make_tiles(imgi, bsize=bsize, augment=augment)
ny, nx, nchan = IMG.shape[:3]
IMG = np.reshape(IMG, (ny*nx, nchan, bsize, bsize))
nbatch = self.batch_size
niter = int(np.ceil(IMG.shape[0]/nbatch))
y = np.zeros((IMG.shape[0], self.nclasses, bsize, bsize))
for k in range(niter):
irange = np.arange(nbatch*k, min(IMG.shape[0], nbatch*k+nbatch))
y0, style = self.net(nd.array(IMG[irange], ctx=self.device))
y0 = y0.asnumpy()
y[irange] = y0
if k==0:
styles = style.asnumpy()[0]
styles += style.asnumpy().sum(axis=0)
styles /= IMG.shape[0]
if augment:
y = np.reshape(y, (ny, nx, self.nclasses, bsize, bsize))
y = transforms.unaugment_tiles(y, self.unet)
y = np.reshape(y, (-1, self.nclasses, bsize, bsize))
yf = transforms.average_tiles(y, ysub, xsub, Ly, Lx)
yf = yf[:,:imgi.shape[1],:imgi.shape[2]]
styles /= (styles**2).sum()**0.5
return yf, styles
def _run_3D(self, imgs, rsz=1.0, anisotropy=None, net_avg=True,
augment=False, tile=True, bsize=224, progress=None):
""" run network on stack of images
(faster if augment is False)
Parameters
--------------
imgs: array [Lz x Ly x Lx x nchan]
rsz: float (optional, default 1.0)
resize coefficient(s) for image
anisotropy: float (optional, default None)
for 3D segmentation, optional rescaling factor (e.g. set to 2.0 if Z is sampled half as dense as X or Y)
net_avg: bool (optional, default True)
runs the 4 built-in networks and averages them if True, runs one network if False
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended);
cannot be turned off for 3D segmentation
bsize: int (optional, default 224)
size of tiles to use in pixels [bsize x bsize]
progress: pyqt progress bar (optional, default None)
to return progress bar status to GUI
Returns
------------------
yf: array [Lz x Ly x Lx x 3]
y[...,0] is Y flow; y[...,1] is X flow; y[...,2] is cell probability
style: array [64]
1D array summarizing the style of the image,
if tiled it is averaged over tiles
"""
sstr = ['YX', 'ZY', 'ZX']
if anisotropy is not None:
rescaling = [[rsz, rsz],
[rsz*anisotropy, rsz],
[rsz*anisotropy, rsz]]
else:
rescaling = [rsz] * 3
pm = [(0,1,2,3), (1,0,2,3), (2,0,1,3)]
ipm = [(3,0,1,2), (3,1,0,2), (3,1,2,0)]
yf = np.zeros((3, self.nclasses, imgs.shape[0], imgs.shape[1], imgs.shape[2]), np.float32)
for p in range(3 - 2*self.unet):
xsl = imgs.copy().transpose(pm[p])
# rescale image for flow computation
shape = xsl.shape
xsl = transforms.resize_image(xsl, rsz=rescaling[p])
# per image
print('\n running %s: %d planes of size (%d, %d) \n\n'%(sstr[p], shape[0], shape[1], shape[2]))
y, style = self._run_nets(xsl, net_avg=net_avg, augment=augment, tile=tile, bsize=bsize)
y = transforms.resize_image(y, shape[1], shape[2])
yf[p] = y.transpose(ipm[p])
if progress is not None:
progress.setValue(25+15*p)
return yf, style
def loss_fn(self, lbl, y):
""" loss function between true labels lbl and prediction y """
criterion = gluon.loss.SoftmaxCrossEntropyLoss(axis=1)
# if available set boundary pixels to 2
if lbl.shape[1]>1 and self.nclasses>2:
boundary = lbl[:,1]<=4
lbl = lbl[:,0]
lbl[boundary] *= 2
else:
lbl = lbl[:,0]
lbl = nd.array(lbl.astype(np.uint8), ctx=self.device)
loss = 8 * 1./self.nclasses * criterion(y, lbl)
return loss
def train(self, train_data, train_labels, train_files=None,
test_data=None, test_labels=None, test_files=None,
channels=None, normalize=True, pretrained_model=None, save_path=None, save_every=100,
learning_rate=0.2, n_epochs=500, weight_decay=0.00001, batch_size=8, rescale=False):
""" train function uses 0-1 mask label and boundary pixels for training """
nimg = len(train_data)
train_data, train_labels, test_data, test_labels, run_test = transforms.reshape_train_test(train_data, train_labels,
test_data, test_labels,
channels, normalize)
# add dist_to_bound to labels
if self.nclasses==3:
print('computing boundary pixels')
train_classes = [np.stack((label, label>0, utils.distance_to_boundary(label)), axis=0).astype(np.float32)
for label in tqdm(train_labels)]
else:
train_classes = [np.stack((label, label>0), axis=0).astype(np.float32)
for label in tqdm(train_labels)]
if run_test:
if self.nclasses==3:
test_classes = [np.stack((label, label>0, utils.distance_to_boundary(label)), axis=0).astype(np.float32)
for label in tqdm(test_labels)]
else:
test_classes = [np.stack((label, label>0), axis=0).astype(np.float32)
for label in tqdm(test_labels)]
# split train data into train and val
val_data = train_data[::8]
val_classes = train_classes[::8]
val_labels = train_labels[::8]
del train_data[::8], train_classes[::8], train_labels[::8]
model_path = self._train_net(train_data, train_classes,
test_data, test_classes,
pretrained_model, save_path, save_every,
learning_rate, n_epochs, weight_decay,
batch_size, rescale)
# find threshold using validation set
print('>>>> finding best thresholds using validation set')
cell_threshold, boundary_threshold = self.threshold_validation(val_data, val_labels)
np.save(model_path+'_cell_boundary_threshold.npy', np.array([cell_threshold, boundary_threshold]))
def threshold_validation(self, val_data, val_labels):
cell_thresholds = np.arange(-4.0, 4.25, 0.5)
if self.nclasses==3:
boundary_thresholds = np.arange(-2, 2.25, 1.0)
else:
boundary_thresholds = np.zeros(1)
aps = np.zeros((cell_thresholds.size, boundary_thresholds.size, 3))
for j,cell_threshold in enumerate(cell_thresholds):
for k,boundary_threshold in enumerate(boundary_thresholds):
masks = []
for i in range(len(val_data)):
output,style = self._run_net(val_data[i].transpose(1,2,0), augment=False)
masks.append(utils.get_masks_unet(output, cell_threshold, boundary_threshold))
ap = metrics.average_precision(val_labels, masks)[0]
ap0 = ap.mean(axis=0)
aps[j,k] = ap0
if self.nclasses==3:
kbest = aps[j].mean(axis=-1).argmax()
else:
kbest = 0
if j%4==0:
print('best threshold at cell_threshold = {} => boundary_threshold = {}, ap @ 0.5 = {}'.format(cell_threshold, boundary_thresholds[kbest],
aps[j,kbest,0]))
if self.nclasses==3:
jbest, kbest = np.unravel_index(aps.mean(axis=-1).argmax(), aps.shape[:2])
else:
jbest = aps.squeeze().mean(axis=-1).argmax()
kbest = 0
cell_threshold, boundary_threshold = cell_thresholds[jbest], boundary_thresholds[kbest]
print('>>>> best overall thresholds: (cell_threshold = {}, boundary_threshold = {}); ap @ 0.5 = {}'.format(cell_threshold, boundary_threshold,
aps[jbest,kbest,0]))
return cell_threshold, boundary_threshold
def _train_net(self, train_data, train_labels,
test_data=None, test_labels=None,
pretrained_model=None, save_path=None, save_every=100,
learning_rate=0.2, n_epochs=500, weight_decay=0.00001,
batch_size=8, rescale=True, netstr='cellpose'):
""" train function uses loss function self.loss_fn """
d = datetime.datetime.now()
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.batch_size = batch_size
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.momentum = 0.9
nimg = len(train_data)
# compute average cell diameter
if rescale:
diam_train = np.array([utils.diameters(train_labels[k][0])[0] for k in range(len(train_labels))])
diam_train[diam_train<5] = 5.
if test_data is not None:
diam_test = np.array([utils.diameters(test_labels[k][0])[0] for k in range(len(test_labels))])
diam_test[diam_test<5] = 5.
scale_range = 0.5
else:
scale_range = 1.0
nchan = train_data[0].shape[0]
print('>>>> training network with %d channel input <<<<'%nchan)
print('>>>> saving every %d epochs'%save_every)
print('>>>> median diameter = %d'%self.diam_mean)
print('>>>> LR: %0.5f, batch_size: %d, weight_decay: %0.5f'%(self.learning_rate, self.batch_size, self.weight_decay))
print('>>>> ntrain = %d'%nimg)
if test_data is not None:
print('>>>> ntest = %d'%len(test_data))
print(train_data[0].shape)
trainer = gluon.Trainer(self.net.collect_params(), 'sgd',{'learning_rate': self.learning_rate,
'momentum': self.momentum, 'wd': self.weight_decay})
eta = np.linspace(0, self.learning_rate, 10)
tic = time.time()
lavg, nsum = 0, 0
if save_path is not None:
_, file_label = os.path.split(save_path)
file_path = os.path.join(save_path, 'models/')
if not os.path.exists(file_path):
os.makedirs(file_path)
else:
print('WARNING: no save_path given, model not saving')
ksave = 0
rsc = 1.0
for iepoch in range(self.n_epochs):
np.random.seed(iepoch)
rperm = np.random.permutation(nimg)
if iepoch<len(eta):
LR = eta[iepoch]
trainer.set_learning_rate(LR)
for ibatch in range(0,nimg,batch_size):
if rescale:
diam_batch = diam_train[rperm[ibatch:ibatch+batch_size]]
rsc = diam_batch / self.diam_mean
else:
rsc = np.ones(len(rperm[ibatch:ibatch+batch_size]), np.float32)
imgi, lbl, scale = transforms.random_rotate_and_resize(
[train_data[i] for i in rperm[ibatch:ibatch+batch_size]],
Y=[train_labels[i][1:] for i in rperm[ibatch:ibatch+batch_size]],
rescale=rsc, scale_range=scale_range, unet=self.unet)
if self.unet and lbl.shape[1]>1 and rescale:
#lbl[:,1] *= scale[0]**2
lbl[:,1] /= diam_batch[:,np.newaxis,np.newaxis]**2
X = nd.array(imgi, ctx=self.device)
with mx.autograd.record():
y, style = self.net(X)
loss = self.loss_fn(lbl, y)
loss.backward()
train_loss = nd.sum(loss).asscalar()
lavg += train_loss
nsum+=len(loss)
if iepoch>0:
trainer.step(batch_size)
if iepoch>self.n_epochs-100 and iepoch%10==1:
LR = LR/2
trainer.set_learning_rate(LR)
if iepoch%10==0 or iepoch<10:
lavg = lavg / nsum
if test_data is not None:
lavgt = 0
nsum = 0
np.random.seed(42)
rperm = np.arange(0, len(test_data), 1, int)
for ibatch in range(0,len(test_data),batch_size):
if rescale:
rsc = diam_test[rperm[ibatch:ibatch+batch_size]] / self.diam_mean
else:
rsc = np.ones(len(rperm[ibatch:ibatch+batch_size]), np.float32)
imgi, lbl, scale = transforms.random_rotate_and_resize(
[test_data[i] for i in rperm[ibatch:ibatch+batch_size]],
Y=[test_labels[i][1:] for i in rperm[ibatch:ibatch+batch_size]],
scale_range=0., rescale=rsc, unet=self.unet)
if self.unet and lbl.shape[1]>1:
lbl[:,1] *= scale[0]**2
X = nd.array(imgi, ctx=self.device)
y, style = self.net(X)
loss = self.loss_fn(lbl, y)
lavgt += nd.sum(loss).asscalar()
nsum+=len(loss)
print('Epoch %d, Time %4.1fs, Loss %2.4f, Loss Test %2.4f, LR %2.4f'%
(iepoch, time.time()-tic, lavg, lavgt/nsum, LR))
else:
print('Epoch %d, Time %4.1fs, Loss %2.4f, LR %2.4f'%
(iepoch, time.time()-tic, lavg, LR))
lavg, nsum = 0, 0
if save_path is not None:
if iepoch==self.n_epochs-1 or iepoch%save_every==1:
# save model at the end
file = '{}_{}_{}'.format(self.net_type, file_label, d.strftime("%Y_%m_%d_%H_%M_%S.%f"))
ksave += 1
print('saving network parameters')
self.net.save_parameters(os.path.join(file_path, file))
return os.path.join(file_path, file)
class CellposeModel(UnetModel):
"""
Parameters
-------------------
gpu: bool (optional, default False)
whether or not to save model to GPU, will check if GPU available
pretrained_model: str or list of strings (optional, default False)
path to pretrained cellpose model(s), if False, no model loaded;
if None, built-in 'cyto' model loaded
net_avg: bool (optional, default True)
loads the 4 built-in networks and averages them if True, loads one network if False
diam_mean: float (optional, default 27.)
mean 'diameter', 27. is built in value for 'cyto' model
device: mxnet device (optional, default None)
where model is saved (mx.gpu() or mx.cpu()), overrides gpu input,
recommended if you want to use a specific GPU (e.g. mx.gpu(4))
"""
def __init__(self, gpu=False, pretrained_model=False,
diam_mean=30., net_avg=True, device=None,
residual_on=True, style_on=True, concatenation=False):
if isinstance(pretrained_model, np.ndarray):
pretrained_model = list(pretrained_model)
nclasses = 3 # 3 prediction maps (dY, dX and cellprob)
self.nclasses = nclasses
model_dir = pathlib.Path.home().joinpath('.cellpose', 'models')
if pretrained_model:
params = parse_model_string(pretrained_model)
if params is not None:
nclasses, residual_on, style_on, concatenation = params
# load default cyto model if pretrained_model is None
elif pretrained_model is None:
if net_avg:
pretrained_model = [os.fspath(model_dir.joinpath('cyto_%d'%j)) for j in range(4)]
if not os.path.isfile(pretrained_model[0]):
download_model_weights()
else:
pretrained_model = os.fspath(model_dir.joinpath('cyto_0'))
if not os.path.isfile(pretrained_model):
download_model_weights()
self.diam_mean = 30.
residual_on = True
style_on = True
concatenation = False
# initialize network
super().__init__(gpu=gpu, pretrained_model=False,
diam_mean=diam_mean, net_avg=net_avg, device=device,
residual_on=residual_on, style_on=style_on, concatenation=concatenation,
nclasses=nclasses)
self.unet = False
self.pretrained_model = pretrained_model
if self.pretrained_model is not None and isinstance(self.pretrained_model, str):
self.net.load_parameters(self.pretrained_model)
ostr = ['off', 'on']
self.net_type = 'cellpose_residual_{}_style_{}_concatenation_{}'.format(ostr[residual_on],
ostr[style_on],
ostr[concatenation])
if pretrained_model:
print(self.net_type)
def eval(self, imgs, batch_size=8, channels=None, normalize=True, invert=False, rescale=None,
do_3D=False, anisotropy=None, net_avg=True, augment=False, tile=True,
resample=False, flow_threshold=0.4, cellprob_threshold=0.0, compute_masks=True,
min_size=15, stitch_threshold=0.0, progress=None):
"""
segment list of images imgs, or 4D array - Z x nchan x Y x X
Parameters
----------
imgs: list or array of images
can be list of 2D/3D/4D images, or array of 2D/3D images
batch_size: int (optional, default 8)
number of 224x224 patches to run simultaneously on the GPU
(can make smaller or bigger depending on GPU memory usage)
channels: list (optional, default None)
list of channels, either of length 2 or of length number of images by 2.
First element of list is the channel to segment (0=grayscale, 1=red, 2=blue, 3=green).
Second element of list is the optional nuclear channel (0=none, 1=red, 2=blue, 3=green).
For instance, to segment grayscale images, input [0,0]. To segment images with cells
in green and nuclei in blue, input [2,3]. To segment one grayscale image and one
image with cells in green and nuclei in blue, input [[0,0], [2,3]].
normalize: bool (default, True)
normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel
invert: bool (optional, default False)
invert image pixel intensity before running network
rescale: float (optional, default None)
resize factor for each image, if None, set to 1.0
do_3D: bool (optional, default False)
set to True to run 3D segmentation on 4D image input
anisotropy: float (optional, default None)
for 3D segmentation, optional rescaling factor (e.g. set to 2.0 if Z is sampled half as dense as X or Y)
net_avg: bool (optional, default True)
runs the 4 built-in networks and averages them if True, runs one network if False
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended)
flow_threshold: float (optional, default 0.4)
flow error threshold (all cells with errors below threshold are kept) (not used for 3D)
cellprob_threshold: float (optional, default 0.0)
cell probability threshold (all pixels with prob above threshold kept for masks)
compute_masks: bool (optional, default True)
Whether or not to compute dynamics and return masks.
This is set to False when retrieving the styles for the size model.
min_size: int (optional, default 15)
minimum number of pixels per mask, can turn off with -1
stitch_threshold: float (optional, default 0.0)
if stitch_threshold>0.0 and not do_3D, masks are stitched in 3D to return volume segmentation
progress: pyqt progress bar (optional, default None)
to return progress bar status to GUI
Returns
-------
masks: list of 2D arrays, or single 3D array (if do_3D=True)
labelled image, where 0=no masks; 1,2,...=mask labels
flows: list of lists 2D arrays, or list of 3D arrays (if do_3D=True)
flows[k][0] = XY flow in HSV 0-255
flows[k][1] = flows at each pixel
flows[k][2] = the cell probability centered at 0.0
styles: list of 1D arrays of length 64, or single 1D array (if do_3D=True)
style vector summarizing each image, also used to estimate size of objects in image
"""
x, nolist = convert_images(imgs.copy(), channels, do_3D, normalize, invert)
nimg = len(x)
self.batch_size = batch_size
styles = []
flows = []
masks = []
if rescale is None:
rescale = np.ones(nimg)
elif isinstance(rescale, float):
rescale = rescale * np.ones(nimg)
if nimg > 1:
iterator = trange(nimg)
else:
iterator = range(nimg)
if isinstance(self.pretrained_model, list) and not net_avg:
self.net.load_parameters(self.pretrained_model[0])
self.net.collect_params().grad_req = 'null'
if not do_3D:
flow_time = 0
net_time = 0
for i in iterator:
img = x[i].copy()
Ly,Lx = img.shape[:2]
tic = time.time()
shape = img.shape
# rescale image for flow computation
img = transforms.resize_image(img, rsz=rescale[i])
y, style = self._run_nets(img, net_avg=net_avg,
augment=augment, tile=tile)
net_time += time.time() - tic
if progress is not None:
progress.setValue(55)
styles.append(style)
if compute_masks:
tic=time.time()
if resample:
y = transforms.resize_image(y, shape[-3], shape[-2])
cellprob = y[:,:,-1]
dP = y[:,:,:2].transpose((2,0,1))
niter = 1 / rescale[i] * 200
p = dynamics.follow_flows(-1 * dP * (cellprob > cellprob_threshold) / 5.,
niter=niter)
if progress is not None:
progress.setValue(65)
maski = dynamics.get_masks(p, iscell=(cellprob>cellprob_threshold),
flows=dP, threshold=flow_threshold)
maski = utils.fill_holes_and_remove_small_masks(maski)
maski = transforms.resize_image(maski, shape[-3], shape[-2],
interpolation=cv2.INTER_NEAREST)
if progress is not None:
progress.setValue(75)
#dP = np.concatenate((dP, np.zeros((1,dP.shape[1],dP.shape[2]), np.uint8)), axis=0)
flows.append([dx_to_circ(dP), dP, cellprob, p])
masks.append(maski)
flow_time += time.time() - tic
else:
flows.append([None]*3)
masks.append([])
print('time spent: running network %0.2fs; flow+mask computation %0.2f'%(net_time, flow_time))
if stitch_threshold > 0.0 and nimg > 1 and all([m.shape==masks[0].shape for m in masks]):
print('stitching %d masks using stitch_threshold=%0.3f to make 3D masks'%(nimg, stitch_threshold))
masks = utils.stitch3D(np.array(masks), stitch_threshold=stitch_threshold)
else:
for i in iterator:
tic=time.time()
shape = x[i].shape
yf, style = self._run_3D(x[i], rsz=rescale[i], anisotropy=anisotropy,
net_avg=net_avg, augment=augment, tile=tile, progress=progress)
cellprob = yf[0][-1] + yf[1][-1] + yf[2][-1]
dP = np.stack((yf[1][0] + yf[2][0], yf[0][0] + yf[2][1], yf[0][1] + yf[1][1]),
axis=0) # (dZ, dY, dX)
print('flows computed %2.2fs'%(time.time()-tic))
# ** mask out values using cellprob to increase speed and reduce memory requirements **
yout = dynamics.follow_flows(-1 * dP * (cellprob > cellprob_threshold) / 5.)
print('dynamics computed %2.2fs'%(time.time()-tic))
maski = dynamics.get_masks(yout, iscell=(cellprob>cellprob_threshold))
maski = utils.fill_holes_and_remove_small_masks(maski, min_size=min_size)
print('masks computed %2.2fs'%(time.time()-tic))
flow = np.array([dx_to_circ(dP[1:,i]) for i in range(dP.shape[1])])
flows.append([flow, dP, cellprob, yout])
masks.append(maski)
styles.append(style)
if nolist:
masks, flows, styles = masks[0], flows[0], styles[0]
return masks, flows, styles
def loss_fn(self, lbl, y):
""" loss function between true labels lbl and prediction y """
criterion = gluon.loss.L2Loss()
criterion2 = gluon.loss.SigmoidBinaryCrossEntropyLoss()
veci = 5. * nd.array(lbl[:,1:], ctx=self.device)
lbl = nd.array(lbl[:,0]>.5, ctx=self.device)
loss = criterion(y[:,:-1] , veci) + criterion2(y[:,-1] , lbl)
return loss
def train(self, train_data, train_labels, train_files=None,
test_data=None, test_labels=None, test_files=None,
channels=None, normalize=True, pretrained_model=None,
save_path=None, save_every=100,
learning_rate=0.2, n_epochs=500, weight_decay=0.00001, batch_size=8, rescale=True):
""" train network with images train_data
Parameters
------------------
train_data: list of arrays (2D or 3D)
images for training
train_labels: list of arrays (2D or 3D)
labels for train_data, where 0=no masks; 1,2,...=mask labels
can include flows as additional images
train_files: list of strings
file names for images in train_data (to save flows for future runs)
test_data: list of arrays (2D or 3D)
images for testing
test_labels: list of arrays (2D or 3D)
labels for test_data, where 0=no masks; 1,2,...=mask labels;
can include flows as additional images
test_files: list of strings
file names for images in test_data (to save flows for future runs)
channels: list of ints (default, None)
channels to use for training
normalize: bool (default, True)
normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel
pretrained_model: string (default, None)
path to pretrained_model to start from, if None it is trained from scratch
save_path: string (default, None)
where to save trained model, if None it is not saved
save_every: int (default, 100)
save network every [save_every] epochs
learning_rate: float (default, 0.2)
learning rate for training
n_epochs: int (default, 500)
how many times to go through whole training set during training
weight_decay: float (default, 0.00001)
batch_size: int (optional, default 8)
number of 224x224 patches to run simultaneously on the GPU
(can make smaller or bigger depending on GPU memory usage)
rescale: bool (default, True)
whether or not to rescale images to diam_mean during training,
if True it assumes you will fit a size model after training or resize your images accordingly,
if False it will try to train the model to be scale-invariant (works worse)
"""
nimg = len(train_data)
train_data, train_labels, test_data, test_labels, run_test = transforms.reshape_train_test(train_data, train_labels,
test_data, test_labels,
channels, normalize)
# check if train_labels have flows
train_flows = dynamics.labels_to_flows(train_labels, files=train_files)
if run_test:
test_flows = dynamics.labels_to_flows(test_labels, files=test_files)
else:
test_flows = None
model_path = self._train_net(train_data, train_flows,
test_data, test_flows,
pretrained_model, save_path, save_every,
learning_rate, n_epochs, weight_decay, batch_size, rescale)
return model_path
class SizeModel():
""" linear regression model for determining the size of objects in image
used to rescale before input to cp_model
uses styles from cp_model
Parameters
-------------------
cp_model: UnetModel or CellposeModel
model from which to get styles
device: mxnet device (optional, default mx.cpu())
where cellpose model is saved (mx.gpu() or mx.cpu())
pretrained_size: str
path to pretrained size model
"""
def __init__(self, cp_model, device=mx.cpu(), pretrained_size=None, **kwargs):
super(SizeModel, self).__init__(**kwargs)
self.device = device
self.pretrained_size = pretrained_size
self.cp = cp_model
self.diam_mean = self.cp.diam_mean
if pretrained_size is not None:
self.params = np.load(self.pretrained_size, allow_pickle=True).item()
self.diam_mean = self.params['diam_mean']
if not hasattr(self.cp, 'pretrained_model'):
raise ValueError('provided model does not have a pretrained_model')
def eval(self, imgs=None, styles=None, channels=None, normalize=True, invert=False, augment=False, tile=True,
batch_size=8, progress=None):
""" use images imgs to produce style or use style input to predict size of objects in image
Object size estimation is done in two steps:
1. use a linear regression model to predict size from style in image
2. resize image to predicted size and run CellposeModel to get output masks.
Take the median object size of the predicted masks as the final predicted size.
Parameters
-------------------
imgs: list or array of images (optional, default None)
can be list of 2D/3D images, or array of 2D/3D images
styles: list or array of styles (optional, default None)
styles for images x - if x is None then styles must not be None
channels: list (optional, default None)
list of channels, either of length 2 or of length number of images by 2.
First element of list is the channel to segment (0=grayscale, 1=red, 2=blue, 3=green).
Second element of list is the optional nuclear channel (0=none, 1=red, 2=blue, 3=green).
For instance, to segment grayscale images, input [0,0]. To segment images with cells
in green and nuclei in blue, input [2,3]. To segment one grayscale image and one
image with cells in green and nuclei in blue, input [[0,0], [2,3]].
normalize: bool (default, True)
normalize data so 0.0=1st percentile and 1.0=99th percentile of image intensities in each channel
invert: bool (optional, default False)
invert image pixel intensity before running network
augment: bool (optional, default False)
tiles image with overlapping tiles and flips overlapped regions to augment
tile: bool (optional, default True)
tiles image to ensure GPU/CPU memory usage limited (recommended)
progress: pyqt progress bar (optional, default None)
to return progress bar status to GUI
Returns
-------
diam: array, float
final estimated diameters from images x or styles style after running both steps
diam_style: array, float
estimated diameters from style alone
"""
if styles is None and imgs is None:
raise ValueError('no image or features given')
if progress is not None:
progress.setValue(10)
if imgs is not None:
x, nolist = convert_images(imgs.copy(), channels, False, normalize, invert)
nimg = len(x)
if styles is None:
styles = self.cp.eval(x, net_avg=False, augment=augment, tile=tile, compute_masks=False)[-1]
if progress is not None:
progress.setValue(30)
diam_style = self._size_estimation(np.array(styles))
if progress is not None:
progress.setValue(50)
else:
if isinstance(styles, list):
styles = np.array(styles)
diam_style = self._size_estimation(styles)
diam_style[
|
np.isnan(diam_style)
|
numpy.isnan
|
#!/usr/bin/env python3
import argparse
import sys
import pandas as p
import numpy as np
from numpy.random import RandomState
from scipy.optimize import minimize_scalar
from numpy.random import RandomState
from scipy.stats import chi2
from collections import defaultdict
from scipy.special import gammaln
#class to perform simple variant filtering assuming fixed genome error rate
def div(x, y):
if x.dtype == np.int64 and y.dtype == np.int64:
return x // y
return x / y
def log_factorial(x):
"""Returns the logarithm of x!
Also accepts lists and NumPy arrays in place of x."""
return gammaln(x + 1)
def mixNLL(p, eta, n, m, f):
mix = p*eta[n,:] + (1-p)*eta[m,:]
return np.dot(f,-np.log(mix))
def log_multinomial_pdf2(f,mix):
return np.dot(f,-np.log(mix))
def log_multinomial_pdf(xs, ps):
"""Returns logarithm of multinomial pdf"""
n = sum(xs)
result = log_factorial(n) - sum(log_factorial(xs)) + sum(xs * np.log(ps))
return result
def expand_sample_names(sample_names):
expanded = []
for name in sample_names:
expanded.append(name + "-A")
expanded.append(name + "-C")
expanded.append(name + "-G")
expanded.append(name + "-T")
return expanded
class Variant_Val():
"""Filters variant position based on simple binomial
or log ratio of binomial to mixtures of binomials"""
def __init__(self,variants, eta, G, tau_star_array, gamma_array, randomState, optimise = False, max_iter = 100, min_p = 0.01,sampleFilter=0.5,min_freq=10):
self.optimise = optimise
self.max_iter = max_iter
self.min_p = min_p
self.upperP = 1.0 - min_p
self.min_freq = min_freq
self.G = G
self.tau = tau_star_array
self.gamma = gamma_array
row_sums = self.gamma.sum(axis=1)
self.gamma = self.gamma / row_sums[:, np.newaxis]
#first get array dimensions
variants_matrix = variants.values
self.genes = list(variants.index)
self.position = variants_matrix[:,0]
variants_matrix = np.delete(variants_matrix, 0, 1)
self.snps = np.reshape(variants_matrix, (variants_matrix.shape[0],int(variants_matrix.shape[1] / 4),4))
self.sumVS = np.sum(self.snps,axis=2)
#self.snps = self.snps[:,np.sum(self.sumVS > 10,axis=0) > self.V*0.9]
self.sumVS = np.sum(self.snps,axis=2)
self.sampleFilter = sampleFilter
self.S = self.snps.shape[1] #number of sites following filtering
self.selectV = np.sum(self.sumVS > self.min_freq,axis=1) >= self.S*self.sampleFilter
self.tau = self.tau[self.selectV,:,:]
self.snps = self.snps[self.selectV,:]
self.V = self.snps.shape[0] #number of variants
self.sumVS = np.sum(self.snps,axis=2)
self.position = self.position[self.selectV]
self.genes = [x for x, y in zip(self.genes, self.selectV.tolist()) if y is True]
self.snps1 = self.snps + 1
self.fsnps = self.snps.astype(np.float)
vs_sum = self.snps.sum(axis=(2))
vs_mean = np.mean(vs_sum,axis=0)
#set random state
self.randomState = randomState
self.eta = eta
def calcMixtureLog(self):
MLL = np.zeros(self.V)
#get most abundant base
self.maxA = np.argmax(self.snps,axis=2)
self.maxAF = np.argmax(np.sum(self.snps,axis=1),axis=1)
#get second most abundant base
ftemp = np.copy(self.snps)
for v in range(self.V):
for s in range(self.S):
ftemp[v,s,self.maxA[v,s]] = -1
self.maxB = np.argmax(ftemp,axis=2)
self.N = (self.snps.sum(axis=2)).astype(np.float64)
self.n = (self.snps.max(axis=2)).astype(np.float64) #value of most abundant base
self.m = (ftemp.max(axis=2)).astype(np.float64) #value of second most abundant
self.e = self.N -self.n
p = self.n/self.N
p[p > self.upperP] = self.upperP
q = self.m/self.N
self.sumVS1 = np.sum(self.snps1,axis=2)
self.nsnps = np.zeros((self.V,self.S,4))
self.div = np.zeros(self.V)
self.ndiv =
|
np.zeros(self.V)
|
numpy.zeros
|
# we need numpy, tt
from __future__ import print_function
try:
xrange
except NameError:
xrange = range
import numpy as np
import tt, tt.cross
from tt.cross.rectcross import cross
# fascilitating reshape
def reshape(x, shape):
return np.reshape(x, shape, order = 'F')
# our TT-als module
from als import ttSparseALS
# import visualization tools
import matplotlib.pyplot as plt
from matplotlib import gridspec
def demo_completion():
d = 3
n=20
crossR=18
shape = np.array([n]*d)
def func(X):
return 1./(1+(X - n/2)**2).sum(axis = 1)**0.5
# tt-approximation built via cross approximation
x0 = tt.rand(np.array([n]*d), r = crossR)
tta = cross(func, x0)
print("TT-cross ranks: ", tta.r)
R = 10
gamma = 0.25
P = int(np.floor(gamma*d*n*(R**2)))
Pb = 100
# random choice
indices = np.random.choice(n, [P, d])
indicesB = np.random.choice(n, [Pb, d])
# making list of tupled stings [indices]
indices = [tuple(indices[k, :]) for k in xrange(indices.shape[0])]
indicesB = [tuple(indicesB[k, :]) for k in xrange(indicesB.shape[0])]
# set naturally filters input to be unique
indices = set(indices)
indicesB = set(indicesB)
# convert it into list
indices = list(indices)
indicesB = list(indicesB)
# return into numpy.array form
indices = np.array(indices)
indicesB = np.array(indicesB)
print("Unique sample points: %d/%d (%d)" % (indices.shape[0], P, n**d))
vals = func(indices)
cooP = {'values': vals, 'indices': indices}
cooPb = {'values': func(indicesB), 'indices': indicesB}
maxR = 5
x0 = tt.rand(shape, r=1)
x0 = x0 * (1./ x0.norm())
x0 = x0.round(0.)
# verbose
vb = True
X1, f = ttSparseALS(
cooP,
shape,
x0=None,
ttRank=maxR,
maxnsweeps=50,
verbose=vb,
tol=1e-8,
alpha = 1e-3
)
# Restore original, initial and approximation into full-format (do not try it in higher dimensions!)
xf1 = X1.full() # approximation ALS
a = tta.full() # original
b =
|
np.zeros([n]*d)
|
numpy.zeros
|
import sys
sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast')
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from skforecast import __version__
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
def create_predictors(y):
'''
Create first 5 lags of a time series.
'''
X_train = pd.DataFrame({'y':y.copy()})
for i in range(0, 5):
X_train[f'lag_{i+1}'] = X_train['y'].shift(i)
X_train = X_train.drop(columns='y').tail(1).to_numpy()
return X_train
# Test initializations
#-------------------------------------------------------------------------------
def test_init_exception_when_window_size_argument_is_string():
with pytest.raises(Exception):
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = '5'
)
def test_init_exception_when_fun_predictors_argument_is_string():
with pytest.raises(Exception):
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = 'create_predictors',
window_size = 5
)
# Test method create_train_X_y()
#-------------------------------------------------------------------------------
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_None():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
results = forecaster.create_train_X_y(y=np.arange(10))
expected = (np.array([[4., 3., 2., 1., 0.],
[5., 4., 3., 2., 1.],
[6., 5., 4., 3., 2.],
[7., 6., 5., 4., 3.],
[8., 7., 6., 5., 4.]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_1d_array():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
results = forecaster.create_train_X_y(y=np.arange(10), exog=np.arange(100, 110))
expected = (np.array([[4., 3., 2., 1., 0., 105.],
[5., 4., 3., 2., 1., 106.],
[6., 5., 4., 3., 2., 107.],
[7., 6., 5., 4., 3., 108.],
[8., 7., 6., 5., 4., 109.]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_2d_array():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
results = forecaster.create_train_X_y(
y=np.arange(10),
exog=np.column_stack([np.arange(100, 110), np.arange(1000, 1010)])
)
expected = (np.array([[4, 3, 2, 1, 0, 105, 1005],
[5, 4, 3, 2, 1, 106, 1006],
[6, 5, 4, 3, 2, 107, 1007],
[7, 6, 5, 4, 3, 108, 1008],
[8, 7, 6, 5, 4, 109, 1009]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
# Test method fit()
#-------------------------------------------------------------------------------
def test_fit_exception_when_y_and_exog_have_different_lenght():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(10))
def test_fit_exception_when_y_lenght_is_less_than_window_size_needed():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(5))
def test_last_window_stored_when_fit_forecaster():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
assert (forecaster.last_window == np.array([45, 46, 47, 48, 49])).all()
def test_in_sample_residuals_stored_when_fit_forecaster():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(7))
assert (forecaster.in_sample_residuals == np.array([0., 0.])).all()
# Test method predict()
#-------------------------------------------------------------------------------
def test_predict_exception_when_steps_lower_than_1():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=0)
def test_predict_exception_when_forecaster_fited_without_exog_and_exog_passed_when_predict():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(10))
def test_predict_exception_when_forecaster_fited_with_exog_but_not_exog_passed_when_predict():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50), exog=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10)
def test_predict_exception_when_exog_lenght_is_less_than_steps():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50), exog=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(5))
def test_predict_exception_when_exog_passed_in_predict_has_different_columns_than_exog_used_to_fit_nparray():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(10), exog=np.arange(30).reshape(-1, 3))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(30).reshape(-1, 2))
def test_predict_exception_when_exog_passed_in_predict_has_different_columns_than_exog_used_to_fit_pdDataDrame():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(10), exog=pd.DataFrame(np.arange(30).reshape(-1, 3)))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=pd.DataFrame(np.arange(30).reshape(-1, 2)))
def test_predict_exception_when_last_window_argument_is_not_numpy_array_or_pandas_series():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, last_window=[1,2,3])
def test_predict_exception_when_last_window_lenght_is_less_than_maximum_lag():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, last_window=pd.Series([1, 2]))
def test_predict_output_when_regresor_is_LinearRegression_lags_is_3_ytrain_is_numpy_arange_50_and_steps_is_5():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=np.arange(50))
predictions = forecaster.predict(steps=5)
expected = np.array([50., 51., 52., 53., 54.])
assert predictions == approx(expected)
# Test method _check_y()
#-------------------------------------------------------------------------------
def test_check_y_exception_when_y_is_int():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=10)
def test_check_y_exception_when_y_is_list():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=[1, 2, 3])
def test_check_y_exception_when_y_is_numpy_array_with_more_than_one_dimension():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=np.arange(10).reshape(-1, 1))
# Test method _check_last_window()
#-------------------------------------------------------------------------------
def test_check_last_window_exception_when_last_window_is_int():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=10)
def test_check_last_window_exception_when_last_window_is_list():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=[1, 2, 3])
def test_check_last_window_exception_when_last_window_is_numpy_array_with_more_than_one_dimension():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_y(y=np.arange(10).reshape(-1, 1))
# Test method _check_exog()
#-------------------------------------------------------------------------------
def test_check_exog_exception_when_exog_is_int():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(exog=10)
def test_check_exog_exception_when_exog_is_list():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(exog=[1, 2, 3])
def test_check_exog_exception_when_exog_is_numpy_array_with_more_than_2_dimensions():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(exog=np.arange(30).reshape(-1, 10, 3))
def test_check_exog_exception_when_ref_type_is_pandas_series_and_exog_is_numpy_array_with_more_than_1_column():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(
exog = np.arange(30).reshape(-1, 2),
ref_type = pd.Series
)
def test_check_exog_exception_when_ref_type_is_pandas_series_and_exog_is_numpy_array_with_more_than_2_dimensions():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(exog=np.arange(30).reshape(-1, 10, 3))
def test_check_exog_exception_when_exog_has_diferent_number_of_columns_than_ref_shape():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(
exog = np.arange(30).reshape(-1, 3),
ref_type = np.ndarray,
ref_shape = (1, 2)
)
def test_check_exog_exception_when_exog_is_1d_numpy_array_and_ref_shape_has_more_than_1_column():
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
with pytest.raises(Exception):
forecaster._check_exog(
exog =
|
np.arange(30)
|
numpy.arange
|
import re
import dlib
import cv2
import numpy as np
import glob
# 读取示例图像
filename_src = "I2G/man.jpeg"
filename_dst = "I2G/man2.jpeg"
img_src = cv2.imread(filename_src)
img_dst = cv2.imread(filename_dst)
img_src = cv2.resize(img_src,(256,256),interpolation= cv2.INTER_AREA)
img_dst= cv2.resize(img_dst,(256,256),interpolation= cv2.INTER_AREA)
img_dst_warped = np.copy(img_dst) # 目标脸的备份
idx_fig = 1
points_src = np.load('I2G/man.npy')
points_src = points_src.astype('int')
points_src = points_src.tolist() # ndarray 不支持插入
points_dst = np.load('I2G/man2.npy')
points_dst = points_dst.astype('int')
points_dst = points_dst.tolist()
#print(points_dst)
# convex 包含的点集
hull_pt_src = []
hull_pt_dst = []
hull_pt_indices = cv2.convexHull(np.array(points_dst),
returnPoints = False)
hull_pt_indices = hull_pt_indices.flatten() # 凸包的下标
# print(hull_pt_indices)
for idx_pt in hull_pt_indices:
hull_pt_src.append(points_src[idx_pt])
hull_pt_dst.append(points_dst[idx_pt])
def draw_delaunay(img,shape):
rect = (0, 0, 256,256)
subdiv = cv2.Subdiv2D(rect)
for r in shape:
subdiv.insert(r)
white = (255,255,255)
trangleList = subdiv.getTriangleList()
for t in trangleList:
pt1 = (int(t[0]),int(t[1]))
pt2 = (int(t[2]),int(t[3]))
pt3 = (int(t[4]),int(t[5]))
cv2.line(img,pt1,pt2,(255,255,255),1)
cv2.line(img,pt2,pt3,(255,255,255),1)
cv2.line(img,pt3,pt1,(255,255,255),1)
cv2.imshow('I',img)
cv2.waitKey(100)
cv2.waitKey(0)
#draw_delaunay(img_src,points_src)
#draw_delaunay(img_dst,points_dst)
def rect_contains(rect, point):
if point[0] < rect[0]:
return False
elif point[1] < rect[1]:
return False
elif point[0] > rect[2]:
return False
elif point[1] > rect[3]:
return False
return True
def cal_delaunay_tri(rect, points):
"""计算狄洛尼三角剖分"""
subdiv = cv2.Subdiv2D(rect)
# 逐点插入
for pt in points:
# subdiv.insert 输入类型:tuple
subdiv.insert(tuple(pt))
lst_tri = subdiv.getTriangleList()
# 狄洛尼三角网格顶点索引
lst_delaunay_tri_pt_indices = []
for tri in lst_tri:
lst_tri_pts = [(tri[0], tri[1])]
lst_tri_pts.append((tri[2], tri[3]))
lst_tri_pts.append((tri[4], tri[5]))
# 查询三角网格顶点索引
lst_pt_indices = []
for tri_pt in lst_tri_pts:
for idx_pt in range(len(points)):
if (abs(tri_pt[0] - points[idx_pt][0]) < 1) and \
(abs(tri_pt[1] - points[idx_pt][1]) < 1):
lst_pt_indices.append(idx_pt)
lst_delaunay_tri_pt_indices.append(lst_pt_indices)
return lst_delaunay_tri_pt_indices
rect = (0,0,256,256)
lst_delaunay_tri_pt_indices = cal_delaunay_tri(rect, hull_pt_dst) # 凸包三角剖分对应的顶点下标
# print(lst_delaunay_tri_pt_indices)
#----------------------------------------------------------------------
def warp_affine(img_src, tri_src, tri_dst, size):
"""仿射"""
# 仿射矩阵
mat_warp = cv2.getAffineTransform(np.float32(tri_src), np.float32(tri_dst))
# 仿射变换
img_dst = cv2.warpAffine(img_src, mat_warp, (size[0], size[1]), None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img_dst
#----------------------------------------------------------------------
def warp_tri(img_src, img_dst, tri_src, tri_dst, alpha=1) :
"""仿射三角剖分,源图像到目标图像"""
# 三角区域框
rect_src = cv2.boundingRect(np.array(tri_src))
rect_dst = cv2.boundingRect(np.array(tri_dst))
# 三角形顶点相对于三角区域框的偏移
tri_src_to_rect = [(item[0] - rect_src[0], item[1] - rect_src[1])
for item in tri_src]
tri_dst_to_rect = [(item[0] - rect_dst[0], item[1] - rect_dst[1])
for item in tri_dst]
# 蒙板
mask = np.zeros((rect_dst[3], rect_dst[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.array(tri_dst_to_rect), (1, 1, 1), 16, 0)
# 截取三角区域框中的源图像
img_src_rect = img_src[rect_src[1] : rect_src[1] + rect_src[3],
rect_src[0] : rect_src[0] + rect_src[2]]
size = (rect_dst[2], rect_dst[3])
# 三角区域框仿射
img_src_rect_warpped = warp_affine(img_src_rect, tri_src_to_rect, tri_dst_to_rect, size)
# 蒙板 * 透明度
mask *= alpha
# 目标图像 = 目标图像 * (1 - 蒙板) + 源图像 * 蒙板
img_dst[rect_dst[1] : rect_dst[1] + rect_dst[3],
rect_dst[0] : rect_dst[0] + rect_dst[2]] = \
img_dst[rect_dst[1] : rect_dst[1] + rect_dst[3],
rect_dst[0] : rect_dst[0] + rect_dst[2]] * (1 - mask) + \
img_src_rect_warpped * mask
# 狄洛尼三角剖分仿射
for tri_pt_indices in lst_delaunay_tri_pt_indices:
# 源图像、目标图像三角顶点坐标
tri_src = [hull_pt_src[tri_pt_indices[idx]] for idx in range(3)]
tri_dst = [hull_pt_dst[tri_pt_indices[idx]] for idx in range(3)]
warp_tri(img_src, img_dst_warped, tri_src, tri_dst, 1)
# 和谐化
mask = np.zeros(img_dst.shape, dtype=img_dst.dtype)
cv2.fillConvexPoly(mask, np.array(hull_pt_dst), (255, 255, 255)) #蒙版
rect = cv2.boundingRect(
|
np.float32([hull_pt_dst])
|
numpy.float32
|
import random
import unittest
import numpy as np
from skratch.datasets import load_iris
from skratch.datasets import load_wine
from skratch.datasets import load_breast_cancer
from skratch.garden import RandomGardenClassifier
class ParametersTest(unittest.TestCase):
def test_max_depth(self):
"""
Test that the model fits and predicts with different parameter values
for max_depth=1,2,3,5,10,20.
"""
for i in [1,2,3,5,10,20]:
X = load_iris().data
y = load_iris().target
RandomGardenClassifier(max_depth=i).fit(X, y)
def test_min_samples(self):
"""
Test that the model fits and predicts with different parameter values
for min_samples_leaf=1,2,3,5,10.
"""
for i in [1,2,3,5,10]:
X = load_iris().data
y = load_iris().target
RandomGardenClassifier(min_samples_leaf=i).fit(X, y)
class TransformationTest(unittest.TestCase):
def test_random(self):
"""
Test that the model does not learn when the target are randomized.
"""
random.seed(28)
np.random.seed(28)
X = np.random.rand(500,5)
y = np.random.randint(2, size=(500))
X_train, X_test = X[0:400,:], X[400:,:]
y_train, y_test = y[0:400], y[400:]
clf = RandomGardenClassifier(n_estimators=3).fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = np.sum(y_pred==y_test)/len(y_test)
assert acc > 0.4 and acc <0.6
class ScoreTest(unittest.TestCase):
def test_iris_fitting(self):
"""
Test if the model is tested with the training data, the adjustment is
perfect.
"""
X = load_iris().data
y = load_iris().target
clf = RandomGardenClassifier(n_estimators=3).fit(X, y)
y_pred = clf.predict(X)
assert np.sum(y_pred==y)/len(y) > 0.9
def test_wine_fitting(self):
"""
Test if the model is tested with the training data, the adjustment is
perfect.
"""
X = load_wine().data
y = load_wine().target
clf = RandomGardenClassifier(n_estimators=3).fit(X, y)
y_pred = clf.predict(X)
assert
|
np.sum(y_pred==y)
|
numpy.sum
|
import os
import json
import h5py
from nltk.tokenize import word_tokenize
import numpy as np
from tqdm import tqdm
from updown.data.config_attrib_selection import attrib_selection
import pickle
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
class ImageFeaturesReader(object):
def __init__(self, split, features_h5path, in_memory = False, use_obj_att_preds=False):
self.features_h5path = features_h5path
self._in_memory = in_memory
self.obj_id2name = list(load_obj("/path/to/obj_id2class_name.pkl"))
self.obj_id2name[0] = "bg"
self.use_obj_att_preds = use_obj_att_preds
self._map = {}
self._num_boxes = {}
self.split = split
self._cache = "/path/to/cache"
if self._in_memory:
print(f"Loading image features from {self.features_h5path}...")
features_h5 = h5py.File(self.features_h5path, "r")
for index in tqdm(range(features_h5["image_id"].shape[0])):
self._map[features_h5["image_id"][index]] = features_h5["features"][index]
self._num_boxes[features_h5["image_id"][index]] = features_h5["num_boxes"][index]
features_h5.close()
else:
self.features_h5 = h5py.File(self.features_h5path, "r")
image_id_np = np.array(self.features_h5["image_id"])
self._map = {image_id_np[index]: index for index in range(image_id_np.shape[0])}
self._num_boxes = {
image_id_np[index]: self.features_h5["num_boxes"][index]
for index in range(image_id_np.shape[0])
}
self.has_obj_atts = False
if(not self.use_obj_att_preds and "obj_atts" in self.features_h5.keys()):
self.obj_atts = {}
self.has_obj_atts = True
cache_path = os.path.join(self._cache, split + "_cache_obj_atts.pkl")
if(os.path.isfile(cache_path)):
print("Load cached obj_atts...")
self.obj_atts = load_obj(cache_path)
else:
print("Extract obj_atts...")
for index in range(image_id_np.shape[0]):
obj_atts = self.features_h5["obj_atts"][index]
result = []
k = 0
while k < len(obj_atts):
if obj_atts[k] < 100:
result.append([obj_atts[k], []])
else:
result[-1][1].append([obj_atts[k]-100, 1])
k += 1
self.obj_atts[image_id_np[index]] = result
save_obj(self.obj_atts, cache_path)
print("done.")
if(self.use_obj_att_preds and "obj_atts_det" in self.features_h5.keys()):
self.obj_atts_det = {}
self.has_obj_atts = True
cache_path = os.path.join(self._cache, split + "_cache_obj_atts_det.pkl")
if(os.path.isfile(cache_path)):
print("Load cached obj_atts_det...")
self.obj_atts_det = load_obj(cache_path)
else:
print("Extract obj_atts_det...")
for index in range(image_id_np.shape[0]):
obj_atts = self.features_h5["obj_atts_det"][index]
result = []
k = 0
while k < len(obj_atts):
if obj_atts[k] < 100:
result.append([int(obj_atts[k]), []])
k += 1
else:
result[-1][1].append([int(obj_atts[k]-100), obj_atts[k+1]])
k += 2
self.obj_atts_det[image_id_np[index]] = result
save_obj(self.obj_atts_det, cache_path)
print("done.")
def __len__(self):
return len(self._map)
def __getitem__(self, image_id):
if self._in_memory:
image_id_features = self._map[image_id]
else:
index = self._map[image_id]
image_id_features = self.features_h5["features"][index]
num_boxes = self._num_boxes[image_id]
obj_atts = None
try:
if(self.use_obj_att_preds):
obj_atts = self.obj_atts_det[image_id]
else:
obj_atts = self.obj_atts[image_id]
if len(obj_atts) != num_boxes:
obj_atts = [[0, []]] * num_boxes
except:
pass
return image_id_features.reshape((num_boxes, -1)), obj_atts
class CocoCaptionsReader(object):
def __init__(self, captions_jsonpath):
self._captions_jsonpath = captions_jsonpath
with open(self._captions_jsonpath) as cap:
captions_json = json.load(cap)
PUNCTUATIONS = [
"''", "'", "``", "`", "(", ")", "{", "}",
".", "?", "!", ",", ":", "-", "--", "...", ";"
]
self._captions = []
print(f"Tokenizing captions from {captions_jsonpath}...")
for caption_item in tqdm(captions_json["annotations"]):
caption = caption_item["caption"].lower().strip()
caption_tokens = word_tokenize(caption)
caption_tokens = [ct for ct in caption_tokens if ct not in PUNCTUATIONS]
self._captions.append((caption_item["image_id"], caption_tokens))
def __len__(self):
return len(self._captions)
def __getitem__(self, index):
return self._captions[index]
class SenticapReader(object):
def __init__(self, senticap_jsonpath: str, train_split=False, val_split=False, test_split=False, sentiment=None):
self.senticap_jsonpath = senticap_jsonpath
with open(senticap_jsonpath) as senti:
senticap_json = json.load(senti)["images"]
PUNCTUATIONS = [
"''", "'", "``", "`", "(", ")", "{", "}",
".", "?", "!", ",", ":", "-", "--", "...", ";"
]
self._captions = []
self._image_ids = set()
self.sentiment = sentiment
print(f"Tokenizing captions from {senticap_jsonpath}...")
for item in tqdm(senticap_json):
senti_coco_id = int(item["filename"].split(".")[0].split("_")[2])
split = item["split"]
if(train_split and (split == "train") or
(val_split and (split == "val")) or
(test_split and (split == "test"))):
cap_added = False
for c in item["sentences"]:
if(not sentiment or sentiment == "pos" and c["sentiment"] == 1 or sentiment == "neg" and c["sentiment"] == 0):
cap_added = True
if(c["sentiment"] == 0):
c["sentiment"] = -1
caption= c["raw"].lower().strip()
caption_tokens = word_tokenize(caption)
caption_tokens = [ct for ct in caption_tokens if ct not in PUNCTUATIONS]
self._captions.append((senti_coco_id, caption_tokens, c["sentiment"]))
if(cap_added):
self._image_ids.add(senti_coco_id)
def __len__(self):
return len(self._captions)
def __getitem__(self, index):
return self._captions[index]
class ExpertReader(object):
def __init__(self, expert_jsonpath: str, train_split=True):
self.expert_jsonpath = expert_jsonpath
with open(expert_jsonpath) as senti:
expert_json = json.load(senti)
PUNCTUATIONS = [
"''", "'", "``", "`", "(", ")", "{", "}",
".", "?", "!", ",", ":", "-", "--", "...", ";"
]
self._captions = []
self._image_ids = set()
print(f"Tokenizing captions from {expert_jsonpath}...")
for item in tqdm(expert_json):
expert_coco_id = int(item["image_id"])
try:
expert_sentiment = item["sentiment"]
except:
expert_sentiment = 0
self._image_ids.add(expert_coco_id)
caption: str = item["caption"].lower().strip()
caption_tokens = word_tokenize(caption)
caption_tokens = [ct for ct in caption_tokens if ct not in PUNCTUATIONS]
self._captions.append((expert_coco_id, caption_tokens, expert_sentiment))
def __len__(self):
return len(self._captions)
def __getitem__(self, index):
return self._captions[index]
class ConstraintBoxesReader(object):
def __init__(self, boxes_jsonpath: str):
_boxes = json.load(open(boxes_jsonpath))
self._image_id_to_boxes = {}
for ann in _boxes["annotations"]:
if ann["image_id"] not in self._image_id_to_boxes:
self._image_id_to_boxes[ann["image_id"]] = []
self._image_id_to_boxes[ann["image_id"]].append(ann)
self._class_names = {}
for c in _boxes["categories"]:
self._class_names[c["id"]] = c["name"]
def __len__(self):
return len(self._image_id_to_boxes)
def __getitem__(self, image_id: int):
bbox_anns = self._image_id_to_boxes.get(int(image_id), [])
boxes = np.array([ann["bbox"] for ann in bbox_anns])
scores = np.array([ann.get("score", 1) for ann in bbox_anns])
class_names = [self._class_names[ann["category_id"]] for ann in bbox_anns]
return {"boxes": boxes, "class_names": class_names, "scores": scores}
class CocoAttributesReader(object):
def __init__(self, attribs_dir_path: str):
self.attrib_weight_threshold = 0.3
self.attrib_min_appearance = 20
self.attribs_n_max_per_image = 99
self._cache = "/path/to/cache"
cache_path = os.path.join(self._cache, "cache_coco_attributes.pkl")
if(os.path.isfile(cache_path)):
print("Load cached CocoAttributes...")
result_read_attributes = load_obj(cache_path)
else:
print("Extract CocoAttributes...")
result_read_attributes = self.read_attributes(attribs_dir_path)
save_obj(result_read_attributes, cache_path)
self.image_ids = result_read_attributes[0]
self.image2obj_insts = result_read_attributes[1]
self.obj_inst2attrib_inst = result_read_attributes[2]
self.attrib_inst2attrib_vector = result_read_attributes[3]
self.ignore_attrib_indices = result_read_attributes[4]
self.attrib_names = result_read_attributes[5]
self.attrib_image_count = result_read_attributes[6]
self.attrib2attrib_inst_count = result_read_attributes[7]
self.n_attribs = len(self.attrib_names)
self.att_counts = np.zeros(self.n_attribs)
name2count = {}
for k,v in self.attrib2attrib_inst_count.items():
self.att_counts[k] = v
name2count[self.attrib_names[k]] = v
self.obj_inst2obj_id = load_obj(os.path.join(attribs_dir_path, "obj_inst2obj_id.pkl"))
self.obj_id2obj_name = load_obj(os.path.join(attribs_dir_path, "obj_id2obj_name.pkl"))
self.avail_obj_ids = set()
self.avail_obj_names = set()
for obj_inst, attrib_inst in self.obj_inst2attrib_inst.items():
try:
if(self.attrib_inst2attrib_vector[attrib_inst].sum() > 0):
self.avail_obj_ids.add(self.obj_inst2obj_id[obj_inst])
self.avail_obj_names.add(self.obj_id2obj_name[self.obj_inst2obj_id[obj_inst]])
except:
pass
def __len__(self):
return len(self.image_ids)
def __getitem__(self, image_id: int):
obj_insts = self.image2obj_insts[image_id]
result = []
for obj_inst in obj_insts:
if(obj_inst in self.obj_inst2attrib_inst):
attrib_inst = self.obj_inst2attrib_inst[obj_inst]
try:
attrib_vec = self.attrib_inst2attrib_vector[attrib_inst]
#result.append([obj_inst, attrib_vec]) # attribs as sparse arrays
#result.append([obj_inst, np.nonzero(attrib_vec)[0]]) # attribs as indizes
if(attrib_vec.sum() > 0):
result.append([self.obj_id2obj_name[self.obj_inst2obj_id[obj_inst]], [[self.attrib_names[x], attrib_vec[x]] for x in np.nonzero(attrib_vec)[0]]])
except:
pass
return self.filter_duplicates(result)
def filter_duplicates(self, result):
result_filtered = {}
for obj in result:
if(obj[0] not in result_filtered):
result_filtered[obj[0]] = obj[1]
else:
result_filtered_atts = [a[0] for a in result_filtered[obj[0]]]
for attrib in obj[1]:
try:
idx = result_filtered_atts.index(attrib[0])
result_filtered[obj[0]][idx][1] = max(result_filtered[obj[0]][idx][1], attrib[1])
except ValueError:
result_filtered[obj[0]].append(attrib)
return [[key, value] for key, value in result_filtered.items()]
def read_attributes(self, attribs_dir_path, ignore_attrib_indices=None):
attrib_inst2attrib_vector = load_obj(os.path.join(attribs_dir_path, "attrib_inst2attrib_vector.pkl"))
attrib_inst2obj_inst = load_obj(os.path.join(attribs_dir_path, "attrib_inst2obj_inst.pkl"))
obj_inst2attrib_inst = load_obj(os.path.join(attribs_dir_path, "obj_inst2attrib_inst.pkl"))
obj_inst2image = load_obj(os.path.join(attribs_dir_path, "obj_inst2image.pkl"))
image2obj_insts = load_obj(os.path.join(attribs_dir_path, "image2obj_insts.pkl"))
attrib2string = load_obj(os.path.join(attribs_dir_path, "attrib2string.pkl"))
attrib_names = []
for key in sorted(attrib2string.keys()):
attrib_names.append(attrib2string[key])
# remove ignored attributes from attribute name list
attrib_selection_list = np.array(list(attrib_selection.values()), dtype=int)
attrib_ignore_selection_idxs = np.argwhere(attrib_selection_list == 0)
attrib_names = np.delete(attrib_names, attrib_ignore_selection_idxs).tolist()
attrib2attrib_inst_count = {}
attrib_image_count = {}
attrib2images = {}
for att_id, atts in list(attrib_inst2attrib_vector.items()):
instance_id = attrib_inst2obj_inst[att_id]
try:
coco_id = obj_inst2image[instance_id]
except:
del attrib_inst2attrib_vector[att_id]
continue
# remove ignored attributes from attribute arrays
atts = np.delete(atts, attrib_ignore_selection_idxs)
#atts = (atts * attrib_selection_list)
idxs_larger = np.argwhere(atts >= self.attrib_weight_threshold)
idxs_larger = [idx[0] for idx in idxs_larger]
idxs_too_small = atts < self.attrib_weight_threshold
# set attribute values in attribute array to zero if smaller than threshold
atts[idxs_too_small] = 0.0
attrib_inst2attrib_vector[att_id] = atts
# add larger attributes to count dict and attrib2images dict
for idx in idxs_larger:
if(idx not in attrib2attrib_inst_count):
attrib2attrib_inst_count[idx] = 1
else:
attrib2attrib_inst_count[idx] += 1
if(idx not in attrib2images):
attrib2images[idx] = {coco_id}
else:
attrib2images[idx].add(coco_id)
# generate image count dict for attribute appearance
for att_id, image_ids in attrib2images.items():
attrib_image_count[att_id] = len(image_ids)
# detect attributes with count lower than threshold
if(ignore_attrib_indices is None):
ignore_attrib_indices = []
for att_id, count in attrib_image_count.items():
if(count < self.attrib_min_appearance):
ignore_attrib_indices.append([att_id])
elif(not ignore_attrib_indices):
raise ValueError("no ignore_attrib_indices is given.")
attrib_names =
|
np.delete(attrib_names, ignore_attrib_indices)
|
numpy.delete
|
#!/usr/bin/env python
'''
Diffraction functions that use pyFAI
mkak 2017.03.14
'''
##########################################################################
# IMPORT PYTHON PACKAGES
import os
import numpy as np
HAS_pyFAI = False
try:
import pyFAI
import pyFAI.units
HAS_pyFAI = True
except ImportError:
pass
from larch.io import tifffile
##########################################################################
# FUNCTIONS
def return_ai(calfile):
if calfile is not None and os.path.exists(calfile):
return pyFAI.load(calfile)
def q_from_xy(x, y, ai=None, calfile=None):
if ai is None: ai = pyFAI.load(calfile)
try:
return ai.qFunction(np.array([y,]),np.array([x,]))[0]
except:
return 0
def twth_from_xy(x, y, ai=None, calfile=None, ang_units='degrees'):
if ai is None: ai = pyFAI.load(calfile)
try:
twth = ai.tth(np.array([y,]),np.array([x,]))
except:
return 0
if ang_units.startswith('rad'):
return twth[0]
else:
return
|
np.degrees(twth[0])
|
numpy.degrees
|
from scipy import misc
import tqdm
import pickle
import os
import numpy as np
def eprint(*args):
_str = " ".join([str(arg) for arg in args])
sys.stderr.write("%s\n" % _str)
def load_english_hnd():
# image names are of the form: data/English/Hnd/Img/Sample001/img001-001.png
fldr = "data/English/Hnd/Img"
NUM_CLASSES = 59
NUM_USERS = 55
IMAGE_SIZE = 32
images, labels, uids = [], [], []
width, height = IMAGE_SIZE, IMAGE_SIZE
MAX_NUM_DOMAINS = NUM_USERS
uid = 0
cache_fname = 'data/english_hnd.pkl'
if os.path.exists(cache_fname):
images, labels, uids = pickle.load(open(cache_fname, "rb"))
else:
for label in tqdm.tqdm(range(NUM_CLASSES)):
label_fldr = "%s/Sample%03d" % (fldr, label+1)
if not os.path.exists(label_fldr):
continue
for fname in os.listdir(label_fldr):
uid = int(fname.split('-')[1][:-4]) - 1
img = misc.imread(label_fldr + "/" + fname, flatten=True)
img = misc.imresize(img, (height, width))
img = img.astype(np.float32)
img = misc.bytescale(img)
img = img.astype(np.uint8)
assert np.max(img) <= 255 and np.min(img) >= 0, "Max and min of image: %f %f" % (np.max(img), np.min(img))
img = (img-128.)/128.
assert np.max(img) != np.min(img)
images.append(img)
labels.append(label)
uids.append(uid)
pickle.dump((images, labels, uids), open(cache_fname, "wb"))
print ("Labels: %s uids: %s" % (labels[:10], uids[:10]))
print ("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
print ("Test images: ", np.max(images[0]), np.min(images[0]))
print ("Read %d examples" % len(images))
images, labels, uids = np.array(images), np.array(labels), np.array(uids)
test_idxs = np.where(uids >= NUM_USERS - 15)
train_idxs = np.where(uids <= NUM_USERS - 25)
dev_idxs = np.intersect1d(np.where(uids > NUM_USERS - 25), np.where(uids < NUM_USERS - 15))
train = (images[train_idxs], labels[train_idxs], uids[train_idxs])
dev = (images[dev_idxs], labels[dev_idxs], uids[dev_idxs])
test = (images[test_idxs], labels[test_idxs], uids[test_idxs])
return (train, dev, dev, test)
def load_english_fnt():
# image names are of the form: data/English/Fnt/Img/Sample001/img001-00078.png
fldr = "data/English/Fnt"
NUM_CLASSES = 62
NUM_USERS = 1016
IMAGE_SIZE = 32
images, labels, uids = [], [], []
width, height = IMAGE_SIZE, IMAGE_SIZE
MAX_NUM_DOMAINS = NUM_USERS
uid = 0
cache_fname = 'data/english_fnt.pkl'
if os.path.exists(cache_fname):
images, labels, uids = pickle.load(open(cache_fname, "rb"))
else:
for label in tqdm.tqdm(range(NUM_CLASSES)):
label_fldr = "%s/Sample%03d" % (fldr, label + 1)
if not os.path.exists(label_fldr):
continue
for fname in os.listdir(label_fldr):
uid = int(fname.split('-')[1][:-4]) - 1
img = misc.imread(label_fldr + "/" + fname, flatten=True)
img = misc.imresize(img, (height, width))
img = img.astype(np.float32)
img = misc.bytescale(img)
img = img.astype(np.uint8)
assert np.max(img) <= 255 and np.min(img) >= 0, "Max and min of image: %f %f" % (np.max(img), np.min(img))
img = (img-128.)/128.
assert np.max(img) != np.min(img)
images.append(img)
labels.append(label)
uids.append(uid)
pickle.dump((images, labels, uids), open(cache_fname, "wb"))
print ("Labels: %s uids: %s" % (labels[:10], uids[:10]))
print ("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
print ("Test images: ", np.max(images[0]), np.min(images[0]))
print ("Read %d examples" % len(images))
images, labels, uids = np.array(images), np.array(labels), np.array(uids)
test_idxs = np.where(uids >= NUM_USERS - 100)
train_idxs = np.where(uids <= NUM_USERS - 500)
dev_idxs = np.intersect1d(np.where(uids > NUM_USERS - 200), np.where(uids < NUM_USERS - 100))
train = (images[train_idxs], labels[train_idxs], uids[train_idxs])
dev = (images[dev_idxs], labels[dev_idxs], uids[dev_idxs])
test = (images[test_idxs], labels[test_idxs], uids[test_idxs])
print ("# train, dev, test: %s %s %s" % (np.shape(train[0]), np.shape(dev[0]), np.shape(test[0])))
return (train, dev, dev, test)
def load_font_images():
# image names are of the form: 32x32/<class name>/<Font_name>.png
fldr = "../not_notMNIST/32x32/"
files = os.listdir(fldr)
NUM_CLASSES = 62
NUM_USERS = 1016
IMAGE_SIZE = 32
images, labels, uids = [], [], []
width, height = IMAGE_SIZE, IMAGE_SIZE
MAX_NUM_DOMAINS = NUM_USERS
uid = 0
cache_fname = 'data/english_fnt.pkl'
if os.path.exists(cache_fname):
images, labels, uids = pickle.load(open(cache_fname, "rb"))
else:
for label in tqdm.tqdm(range(NUM_CLASSES)):
label_fldr = "%s/Sample%03d" % (fldr, label + 1)
if not os.path.exists(label_fldr):
continue
for fname in os.listdir(label_fldr):
uid = int(fname.split('-')[1][:-4]) - 1
img = misc.imread(label_fldr + "/" + fname, flatten=True)
img = misc.imresize(img, (height, width))
img = img.astype(np.float32)
img = misc.bytescale(img)
img = img.astype(np.uint8)
assert np.max(img) <= 255 and np.min(img) >= 0, "Max and min of image: %f %f" % (
|
np.max(img)
|
numpy.max
|
import numpy as np
import math as m
import scipy
from scipy.optimize import minimize
def BB2Viewpoint(alpha):
'''Convert the viewpoint angle to discrete viewpoint
'''
alpha = alpha*180.0/m.pi
if alpha > 360:
alpha = alpha-360
elif alpha < -360:
alpha = alpha+360
viewpoint = -1
threshold = 4.0
if alpha >= -90.0 - threshold and alpha <= -90.0 + threshold :
viewpoint = 0
elif alpha >= -180.0 + threshold and alpha <= -90.0 - threshold :
viewpoint = 1
elif alpha >= 180.0 - threshold or alpha <= -180.0 + threshold :
viewpoint = 2
elif alpha >= 90.0 + threshold and alpha <= 180.0 - threshold :
viewpoint = 3
elif alpha >= 90.0 - threshold and alpha <= 90.0 + threshold :
viewpoint = 4
elif alpha >= 0.0 + threshold and alpha <= 90.0 - threshold :
viewpoint = 5
elif alpha >= 0.0 - threshold and alpha <= 0.0 + threshold :
viewpoint = 6
elif alpha >= -90.0 + threshold and alpha <= 0.0 - threshold :
viewpoint = 7
return viewpoint
def viewpoint2vertex(view_point, w, l):
'''Obtain the 3D vertex that corresponds to the left, right, bottom side of the 2D box
vertex define
6 ____ 7
2 /|___/| / z(l)
/ / / / 3 /
5 /_/_4/ / /----->x(w)
|/___|/ |
1 0 | y(h)
viewpoint define
3 __4__ 5
/|___/|
2 / / / /
/_/_ / / 6
|/___|/
1 0 7
orientation define
180
____ head
/|___/|
-90 / / / /
/_/_ / / 90
|/___|/
0 back
kpt define
1 ____ 2 head
/|___/|
/ / / /
/_/_ / /
|/___|/
0 3 back
viewpoint angle (alpha) define
90
____ head
/|___/|
+-180 / / / /
/_/_ / / 0
|/___|/
-90 back
'''
if view_point == 0:
left_vertex = np.array([-w, 0, -l])/2
right_vertex = np.array([w, 0, -l])/2
bottom_vertex = np.array([w, 0, -l])/2
elif view_point == 1:
left_vertex = np.array([-w, 0, l])/2
right_vertex = np.array([w, 0, -l])/2
bottom_vertex = np.array([-w, 0, -l])/2
elif view_point == 2:
left_vertex = np.array([-w, 0, l])/2
right_vertex = np.array([-w, 0, -l])/2
bottom_vertex = np.array([-w, 0, -l])/2
elif view_point == 3:
left_vertex = np.array([w, 0, l])/2
right_vertex = np.array([-w, 0, -l])/2
bottom_vertex = np.array([-w, 0, l])/2
elif view_point == 4:
left_vertex = np.array([w, 0, l])/2
right_vertex = np.array([-w, 0, l])/2
bottom_vertex = np.array([-w, 0, l])/2
elif view_point == 5:
left_vertex = np.array([w, 0, -l])/2
right_vertex = np.array([-w, 0, l])/2
bottom_vertex = np.array([w, 0, l])/2
elif view_point == 6:
left_vertex = np.array([w, 0, -l])/2
right_vertex = np.array([w, 0, l])/2
bottom_vertex = np.array([w, 0, l])/2
else:
left_vertex = np.array([-w, 0, -l])/2
right_vertex = np.array([w, 0, l])/2
bottom_vertex = np.array([w, 0, -l])/2
return left_vertex, right_vertex, bottom_vertex
def kpt2vertex(kpt_type, w, l):
''' Obtain the 3D vertex that corresponds to the keypoint
kpt define
1 ____ 2 head
/|___/|
/ / / /
/_/_ / /
|/___|/
0 3 back
'''
if kpt_type == 0:
kpt_vertex = np.array([-w, 0, -l])/2
elif kpt_type == 1:
kpt_vertex = np.array([-w, 0, l])/2
elif kpt_type == 2:
kpt_vertex = np.array([w, 0, l])/2
elif kpt_type == 3:
kpt_vertex = np.array([w, 0, -l])/2
return kpt_vertex
def kpt2alpha(kpt_pos, kpt_type, box):
'''Convert keypoint to viewpoint angle approximately.
It is only used for get discrete viewpoint, so we dont
need the accurate viewpoint angle.
'''
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
box_width = box[2]-box[0]
if kpt_type == 0:
alpha = -m.pi/2 - m.asin(clamp((kpt_pos-box[0])/box_width,-1,1)) # 0 -> -90, 1 -> -180
elif kpt_type == 1:
alpha = m.pi - m.asin(clamp((kpt_pos-box[0])/box_width,-1,1)) # 0 -> 180, 1 -> 90
elif kpt_type == 2:
alpha = m.pi/2 - m.asin(clamp((kpt_pos-box[0])/box_width,-1,1)) # 0 -> 90, 1 -> 0
elif kpt_type == 3:
alpha = - m.asin(clamp((kpt_pos-box[0])/box_width,-1,1)) # 0 -> 0, 1 -> -90
return alpha
def solve_x_y_z_theta_from_kpt(im_shape, calib, alpha, dim, box_left, box_right, depth, kpts):
''' Solve initial 3D bounding box use the 2D bounding boxes, keypoints/alpha angle
Inputs:
alpha: viewpoint angle
dim: regressed object dimension in w,h,l
box_left:
box_right: left and right 2D box
kpts: contain u coordinates of the left borderline,
u coordinates of the right borderline
u coordinates of the perspective keypoint,
type of the perspective keypoint (0,1,2,3)
Return:
status: 0: faild, 1: normal
x: solved object pose (x, y, z, theta)
'''
if not isinstance(im_shape, np.ndarray) and not isinstance(im_shape, list):
im_shape = np.array([im_shape, im_shape], dtype=np.float32)
# if kpts[1] - kpts[0] < 3 or box_left[2]-box_left[0]< 10 or box_left[3]-box_left[1]< 10:
# return 0, 0
kpt_pos = kpts[2]
kpt_type = int(kpts[3])
w_max, h_max = im_shape[0], im_shape[1]
truncate_border = 10
w, h, l = dim[0], dim[1], dim[2]
ul, ur, vt, vb = box_left[0], box_left[2], box_left[1], box_left[3]
ul_r, ur_r = box_right[0], box_right[2]
f = calib.p2[0,0]
cx, cy = calib.p2[0,2], calib.p2[1,2]
bl = (calib.p2[0,3] - calib.p3[0,3])/f
# normalize image plane
left_u = (ul - cx)/f
right_u = (ur - cx)/f
top_v = (vt - cy)/f
bottom_v = (vb - cy)/f
kpt_u = (kpt_pos - cx)/f
left_u_right = (ul_r - cx)/f
right_u_right = (ur_r - cx)/f
if ul < 2.0*truncate_border or ur > w_max - 2.0*truncate_border:
truncation = True
else:
truncation = False
if not truncation: # in truncation case, we use alpha instead of keypoints
alpha = kpt2alpha(kpt_pos, kpt_type, box_left)
# get 3d vertex
view_point = BB2Viewpoint(alpha)
left_vertex_o, right_vertex_o, bottom_vertex_o = viewpoint2vertex(view_point, w, l)
kpt_vertex_o = kpt2vertex(kpt_type, w, l)
left_w = left_vertex_o[0]
left_l = left_vertex_o[2]
right_w = right_vertex_o[0]
right_l = right_vertex_o[2]
bottom_w = bottom_vertex_o[0]
bottom_l = bottom_vertex_o[2]
kpt_w = kpt_vertex_o[0]
kpt_l = kpt_vertex_o[2]
def f_kpt(states): # x, y, theta
x = states[0]
y = states[1]
z = states[2]
theta = states[3]
res_ul = (x + np.cos(theta)*left_w + np.sin(theta)*left_l)/(z-np.sin(theta)*left_w+np.cos(theta)*left_l) - left_u
res_ur = (x + np.cos(theta)*right_w + np.sin(theta)*right_l)/(z-
|
np.sin(theta)
|
numpy.sin
|
from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
import weakref
import pytest
from contextlib import contextmanager
from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
import numpy.core._multiarray_tests as _multiarray_tests
from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""
Allocate a new ndarray with aligned memory.
The ndarray is guaranteed *not* aligned to twice the requested alignment.
Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + 2*align + 1, np.uint8)
ptr = buf.__array_interface__['data'][0]
offset = ptr % align
if offset != 0:
offset = align - offset
if (ptr % (2*align)) == 0:
offset += align
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_writeable_from_readonly(self):
# gh-9440 - make sure fromstring, from buffer on readonly buffers
# set writeable False
data = b'\x00' * 100
vals = np.frombuffer(data, 'B')
assert_raises(ValueError, vals.setflags, write=True)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_raises(ValueError, vals.setflags, write=True)
def test_writeable_from_buffer(self):
data = bytearray(b'\x00' * 100)
vals = np.frombuffer(data, 'B')
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
values = np.core.records.fromstring(data, types)
vals = values['vals']
assert_(vals.flags.writeable)
vals.setflags(write=False)
assert_(vals.flags.writeable is False)
vals.setflags(write=True)
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
# See condition for using PyArray_SetBaseObject in
# array_setstate.
a = np.arange(1000)
for v in range(pickle.HIGHEST_PROTOCOL):
vals = pickle.loads(pickle.dumps(a, v))
assert_(vals.flags.writeable)
assert_(isinstance(vals.base, bytes))
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags.updateifcopy, False)
with assert_warns(DeprecationWarning):
assert_equal(self.a.flags['U'], False)
assert_equal(self.a.flags['UPDATEIFCOPY'], False)
assert_equal(self.a.flags.writebackifcopy, False)
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core._multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
for dtype in (np.longdouble, np.longcomplex):
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
# construction
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
def test_cast_to_string(self):
# cast to str should do "str(scalar)", not "str(scalar.item())"
# Example: In python2, str(float) is truncated, so we want to avoid
# str(np.float64(...).item()) as this would incorrectly truncate.
a = np.zeros(1, dtype='S20')
a[:] = np.array(['1.12345678901234567890'], dtype='f8')
assert_equal(a[0], b"1.1234567890123457")
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
def test_structured_non_void(self):
fields = [('a', '<i2'), ('b', '<i2')]
dt_int = np.dtype(('i4', fields))
assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
# gh-9821
arr_int = np.zeros(4, dt_int)
assert_equal(repr(arr_int),
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
def test_real_imag(self):
# contiguity checks are for gh-11245
x = np.array(1j)
xr = x.real
xi = x.imag
assert_equal(xr, np.array(0))
assert_(type(xr) is np.ndarray)
assert_equal(xr.flags.contiguous, True)
assert_equal(xr.flags.f_contiguous, True)
assert_equal(xi, np.array(1))
assert_(type(xi) is np.ndarray)
assert_equal(xi.flags.contiguous, True)
assert_equal(xi.flags.f_contiguous, True)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
"""
Test the np.array constructor
"""
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@pytest.mark.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
def test_jagged_ndim_object(self):
# Lists of mismatching depths are treated as object arrays
a = np.array([[1], 2, 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, [2], 3])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([1, 2, [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
def test_jagged_shape_object(self):
# The jagged dimension of a list is turned into an object array
a = np.array([[1, 1], [2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2, 2], [3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
a = np.array([[1], [2], [3, 3]])
assert_equal(a.shape, (3,))
assert_equal(a.dtype, object)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
def test_assignment(self):
def testassign(arr, v):
c = arr.copy()
c[0] = v # assign using setitem
c[1:] = v # assign using "dtype_transfer" code paths
return c
dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
arr = np.ones(2, dt)
v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
v4 = np.array([(2,)], dtype=[('bar', 'i8')])
v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
ans = np.array([(2,3),(2,3)], dtype=dt)
assert_equal(testassign(arr, v1), ans)
assert_equal(testassign(arr, v2), ans)
assert_equal(testassign(arr, v3), ans)
assert_raises(ValueError, lambda: testassign(arr, v4))
assert_equal(testassign(arr, v5), ans)
w[:] = 4
assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
# test field-reordering, assignment by position, and self-assignment
a = np.array([(1,2,3)],
dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
a[['foo', 'bar']] = a[['bar', 'foo']]
assert_equal(a[0].item(), (2,1,3))
# test that this works even for 'simple_unaligned' structs
# (ie, that PyArray_EquivTypes cares about field order too)
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
def test_structuredscalar_indexing(self):
# test gh-7262
x = np.empty(shape=1, dtype="(2)3S,(2)3U")
assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
assert_equal(x[0], x[0][()])
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
assert_raises(KeyError, lambda : a[['a','a']])
assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@pytest.mark.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b'', (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b'a', b'0', b' ']:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.unicode_)
@pytest.mark.xfail(reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
class TestZeroSizeFlexible(object):
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
if dtype == np.void:
return np.zeros(shape, dtype=(dtype, 0))
# not constructable directly
dtype = np.dtype([('x', dtype, 0)])
return np.zeros(shape, dtype=dtype)['x']
def test_create(self):
zs = self._zeros(10, bytes)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, np.void)
assert_equal(zs.itemsize, 0)
zs = self._zeros(10, unicode)
assert_equal(zs.itemsize, 0)
def _test_sort_partition(self, name, kinds, **kwargs):
# Previously, these would all hang
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
sort_method = getattr(zs, name)
sort_func = getattr(np, name)
for kind in kinds:
sort_method(kind=kind, **kwargs)
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
self._test_sort_partition('sort', kinds='qhm')
def test_argsort(self):
self._test_sort_partition('argsort', kinds='qhm')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
def test_argpartition(self):
self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
def test_resize(self):
# previously an error
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
zs.resize(25)
zs.resize((10, 10))
def test_view(self):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
# viewing as itself should be allowed
assert_equal(zs.view(dt).dtype, np.dtype(dt))
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
p = pickle.dumps(zs, protocol=proto)
zs2 = pickle.loads(p)
assert_equal(zs.dtype, zs2.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_pickle_with_buffercallback(self):
array = np.arange(10)
buffers = []
bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
protocol=5)
array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
# when using pickle protocol 5 with buffer callbacks,
# array_from_buffer is reconstructed from a buffer holding a view
# to the initial array's data, so modifying an element in array
# should modify it in array_from_buffer too.
array[0] = -1
assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
oned = np.ones(1)
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view('V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
rand = np.random.randint(256, size=4000, dtype=np.uint8)
arr = rand.view(dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianness in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l')
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l')
assert_array_equal(b, np.zeros(0, dtype=np.intp))
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test empty array, use a fresh array to get warnings in
# valgrind if access happens.
e = np.ndarray(shape=0, buffer=b'', dtype=dt)
b = e.searchsorted(a, 'l', s[:0])
assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
b = a.searchsorted(e, 'l', s)
assert_array_equal(b, np.zeros(0, dtype=np.intp))
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
@pytest.mark.parametrize('func', (np.dot, np.matmul))
def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(func(eaf, eaf), eaf)
assert_equal(func(eaf.T, eaf), eaf)
assert_equal(func(eaf, eaf.T), eaf)
assert_equal(func(eaf.T, eaf.T), eaf)
assert_equal(func(eaf.T.copy(), eaf), eaf)
assert_equal(func(eaf, eaf.T.copy()), eaf)
assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(func(ebf, ebf), eaf)
assert_equal(func(ebf.T, ebf), eaf)
assert_equal(func(ebf, ebf.T), eaf)
assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
func(edf[::-1, :], edf.T),
func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
func(edf[:, ::-1], edf.T),
func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
func(edf, edf[::-1, :].T),
func(edf, edf[::-1, :].T.copy())
)
assert_equal(
func(edf, edf[:, ::-1].T),
func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(func(edf, edf.T), eddtf)
assert_equal(func(edf.T, edf), edtdf)
@pytest.mark.parametrize('func', (np.dot, np.matmul))
@pytest.mark.parametrize('dtype', 'ifdFD')
def test_no_dgemv(self, func, dtype):
# check vector arg for contiguous before gemv
# gh-12156
a = np.arange(8.0, dtype=dtype).reshape(2, 4)
b = np.broadcast_to(1., (4, 1))
ret1 = func(a, b)
ret2 = func(a, b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T)
assert_equal(ret1, ret2)
# check for unaligned data
dt = np.dtype(dtype)
a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
a = a.reshape(2, 4)
b = a[0]
# make sure it is not aligned
assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
ret1 = func(a, b)
ret2 = func(a.copy(), b.copy())
assert_equal(ret1, ret2)
ret1 = func(b.T, a.T)
ret2 = func(b.T.copy(), a.T.copy())
assert_equal(ret1, ret2)
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_dot_matmul_out(self):
# gh-9641
class Sub(np.ndarray):
pass
a = np.ones((2, 2)).view(Sub)
b = np.ones((2, 2)).view(Sub)
out = np.ones((2, 2))
# make sure out can be any ndarray (not only subclass of inputs)
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
def test_dot_matmul_inner_array_casting_fails(self):
class A(object):
def __array__(self, *args, **kwargs):
raise NotImplementedError
# Don't override the error from calling __array__()
assert_raises(NotImplementedError, np.dot, A(), A())
assert_raises(NotImplementedError, np.matmul, A(), A())
assert_raises(NotImplementedError, np.inner, A(), A())
def test_matmul_out(self):
# overlapping memory
a = np.arange(18).reshape(2, 3, 3)
b = np.matmul(a, a)
c = np.matmul(a, a, out=a)
assert_(c is a)
assert_equal(c, b)
a = np.arange(18).reshape(2, 3, 3)
c = np.matmul(a, a, out=a[::-1, ...])
assert_(c.base is a.base)
assert_equal(c, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_size_zero_memleak(self):
# Regression test for issue 9615
# Exercises a special-case code path for dot products of length
# zero in cblasfuncs (making it is specific to floating dtypes).
a = np.array([], dtype=np.float64)
x = np.array(2.0)
for _ in range(100):
np.dot(a, a, out=x)
if HAS_REFCOUNT:
assert_(sys.getrefcount(x) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(np.AxisError, a.swapaxes, -5, 0)
assert_raises(np.AxisError, a.swapaxes, 4, 0)
assert_raises(np.AxisError, a.swapaxes, 0, -5)
assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestCequenceMethods(object):
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
if sys.version_info >= (3, 5):
ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
class SomeClass(object):
def __init__(self, num=None):
self.num = num
# want to ensure a fast pow path is not taken
def __mul__(self, other):
raise AssertionError('__mul__ should not be called')
def __div__(self, other):
raise AssertionError('__div__ should not be called')
def __pow__(self, exp):
return SomeClass(num=self.num ** exp)
def __eq__(self, other):
if isinstance(other, SomeClass):
return self.num == other.num
__rpow__ = __pow__
def pow_for(exp, arr):
return np.array([x ** exp for x in arr])
obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
def test_pos_array_ufunc_override(self):
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*[i.view(np.ndarray) for
i in inputs], **kwargs)
tst = np.array('foo').view(A)
with assert_raises(TypeError):
+tst
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_highest_available_pickle_protocol(self):
try:
import pickle5
except ImportError:
pickle5 = None
if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
assert pickle.HIGHEST_PROTOCOL >= 5
else:
assert pickle.HIGHEST_PROTOCOL < 5
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
def test_correct_protocol5_error_message(self):
array = np.arange(10)
if sys.version_info[:2] in ((3, 6), (3, 7)):
# For the specific case of python3.6 and 3.7, raise a clear import
# error about the pickle5 backport when trying to use protocol=5
# without the pickle5 package
with pytest.raises(ImportError):
array.__reduce_ex__(5)
elif sys.version_info[:2] < (3, 6):
# when calling __reduce_ex__ explicitly with protocol=5 on python
# raise a ValueError saying that protocol 5 is not available for
# this python version
with pytest.raises(ValueError):
array.__reduce_ex__(5)
def test_record_array_with_object_dtype(self):
my_object = object()
arr_with_object = np.array(
[(my_object, 1, 2.0)],
dtype=[('a', object), ('b', int), ('c', float)])
arr_without_object = np.array(
[('xxx', 1, 2.0)],
dtype=[('a', str), ('b', int), ('c', float)])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_arr_with_object = pickle.loads(
pickle.dumps(arr_with_object, protocol=proto))
depickled_arr_without_object = pickle.loads(
pickle.dumps(arr_without_object, protocol=proto))
assert_equal(arr_with_object.dtype,
depickled_arr_with_object.dtype)
assert_equal(arr_without_object.dtype,
depickled_arr_without_object.dtype)
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
reason="requires pickle protocol 5")
def test_f_contiguous_array(self):
f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
buffers = []
# When using pickle protocol 5, Fortran-contiguous arrays can be
# serialized using out-of-band buffers
bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
buffer_callback=buffers.append)
assert len(buffers) > 0
depickled_f_contiguous_array = pickle.loads(bytes_string,
buffers=buffers)
assert_equal(f_contiguous_array, depickled_f_contiguous_array)
def test_non_contiguous_array(self):
non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
assert not non_contiguous_array.flags.c_contiguous
assert not non_contiguous_array.flags.f_contiguous
# make sure non-contiguous arrays can be pickled-depickled
# using any protocol
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
depickled_non_contiguous_array = pickle.loads(
pickle.dumps(non_contiguous_array, protocol=proto))
assert_equal(non_contiguous_array, depickled_non_contiguous_array)
def test_roundtrip(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
gc.collect()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
return pickle.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T), T, mask, val)
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
self.tst_basic(x.copy().astype(T))
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.frombuffer(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self):
# gh-12300
with open(self.filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(self.filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, self.filename, dtype=object)
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@pytest.mark.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
with CommaDecimalPointLocale():
self.test_numbers()
self.test_nan()
self.test_inf()
self.test_counted_string()
self.test_ascii()
self.test_malformed()
self.test_tofile_sep()
self.test_tofile_format()
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
# for 1.14 all are set to non-writeable on the way to replacing the
# UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is False)
with assert_warns(DeprecationWarning):
assert_(c.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(d.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
assert_(e.flags.updateifcopy is False)
with assert_warns(DeprecationWarning):
# UPDATEIFCOPY is removed.
assert_(f.flags.updateifcopy is False)
assert_(c.flags.writebackifcopy is False)
assert_(d.flags.writebackifcopy is False)
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
def test_empty_view(self):
# check that sizes containing a zero don't trigger a reallocate for
# already empty arrays
x = np.zeros((10, 0), int)
x_view = x[...]
x_view.resize((0, 10))
x_view.resize((0, 100))
def test_check_weakref(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
xref = weakref.ref(x)
assert_raises(ValueError, x.resize, (5, 1))
del xref # avoid pyflakes unused variable warning.
class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_dtype_init():
np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(TypeError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
def test_dtype_unicode():
np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_names(self):
# Unicode field names are converted to ascii on Python 2:
encodable_name = u'b'
assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b')
assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b')
# But raises UnicodeEncodeError if it can't be encoded:
nonencodable_name = u'\uc3bc'
assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)])
assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)])
def test_fromarrays_unicode(self):
# A single name string provided to fromarrays() is allowed to be unicode
# on both Python 2 and 3:
x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4')
assert_equal(x['a'][0], 0)
assert_equal(x['b'][0], 1)
def test_unicode_order(self):
# Test that we can sort with order as a unicode field name in both Python 2 and
# 3:
name = u'b'
x = np.array([1, 3, 2], dtype=[(name, int)])
x.sort(order=name)
assert_equal(x[u'b'], np.array([1, 2, 3]))
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
pytest.skip('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
def test_multifield_indexing_view(self):
a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
v = a[['a', 'c']]
assert_(v.base is a)
assert_(v.dtype == np.dtype({'names': ['a', 'c'],
'formats': ['i4', 'u4'],
'offsets': [0, 8]}))
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(object):
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(object):
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_scalar_output(self):
vec1 = np.array([2])
vec2 = np.array([3, 4]).reshape(1, -1)
tgt = np.array([6, 8])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt)
res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?').reshape(1, -1)
res = self.matmul(vec[:, 0], vec)
assert_equal(res, True)
def test_vector_vector_values(self):
vec1 = np.array([1, 2])
vec2 = np.array([3, 4]).reshape(-1, 1)
tgt1 = np.array([11])
tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
v1 = vec1.astype(dt)
v2 = vec2.astype(dt)
res = self.matmul(v1, v2)
assert_equal(res, tgt1)
# no broadcast, we must make v1 into a 2d ndarray
res = self.matmul(v2, v1.reshape(1, -1))
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast ufunc matmul output"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, '')
c = c.astype(tgt.dtype)
|
assert_array_equal(c, tgt)
|
numpy.testing.assert_array_equal
|
import os
import json
import numpy as np
from datetime import datetime
from threading import Thread, Lock
to_rad = np.pi / 180.0
to_deg = 180.0 / np.pi
steps = [32, 16, 8, 4, 2, 1]
class PointCloud:
def __init__(self, key, lon, lat, alt, value, time, epoch):
self.key = key
self.lon = lon
self.lat = lat
self.alt = alt
self.time = time
self.value = value
self.epoch = epoch
self.tasks = []
self.threads = []
for i in range(10):
self.threads.append(Thread(target=self.worker_function))
self.tileset_lock = Lock()
self.tileset_json = {
"asset": {
"version": "1.0",
"type": "Airborne Radar"
},
"root": {
"geometricError": 1000000,
"refine" : "REPLACE",
"boundingVolume": {
"region": [
float(np.min(lon)) * to_rad,
float(np.min(lat)) * to_rad,
float(np.max(lon)) * to_rad,
float(np.max(lat)) * to_rad,
float(np.min(alt)) * to_rad,
float(np.max(alt)) * to_rad
]
},
"children": []
},
"properties": {
"epoch": "{}Z".format(datetime.utcfromtimestamp(epoch).isoformat()),
"refined": []
}
}
def worker_function(self):
while len(self.tasks) > 0:
tile, start, end = self.tasks.pop()
print(tile, start, end)
self.generate(tile, start, end)
def start(self):
for t in self.threads:
t.start()
def join(self):
for t in self.threads:
t.join()
with open('{}/tileset.json'.format(self.key), mode='w+') as outfile:
json.dump(self.tileset_json, outfile)
def schedule_task(self, tile, start, end):
self.tasks.append((tile, start, end))
def generate(self, tile, start, end):
print(tile, start, end)
parent_tile = self.tileset_json["root"]
cartesian, offset, scale, cartographic, region = self.cartographic_to_cartesian(start, end)
value = self.value[start:end]
time = self.time[start:end]
epoch = int(
|
np.min(time)
|
numpy.min
|
from core.sequence_filler import SequenceFiller, fill_fn
import numpy as np
from scipy import interpolate, ndimage
import data_source.temperature_model.parser as parser
import data_source.temperature_model.proxy as proxy
import data_source.temperature_model.gfs_parser as gfs
from math import floor, ceil
import logging as log
HOUR = 3600
MODEL_PERIOD = 6 * HOUR
MODEL_LAG_H = 72 # assume NCEP/NCAR reanalysis data for that time back is always available
FORECAST_OVERLAP_H = 12
FORECAST_ALLOW_FUTURE = 4 * 24 * HOUR
MODEL_LAG = (MODEL_LAG_H * HOUR) // MODEL_PERIOD * MODEL_PERIOD
MODEL_EPOCH =
|
np.datetime64('1948-01-01')
|
numpy.datetime64
|
"""
nitrogen.basis
--------------
Basis set functions including discrete-variable
representations (DVRs) and finite-basis representations
(FBRs). The main objects are the :class:`GriddedBasis` class
and its sub-classes :class:`GenericDVR` and :class:`NDBasis`.
See :doc:`tutorials/dvr` for a tutorial.
================================ ===================================
General gridded bases
======================================================================
:class:`GriddedBasis` General quadrature grid basis.
:class:`ConcatenatedBasis` Direct sum of :class:`GriddedBasis`
-------------------------------- -----------------------------------
**Discrete-variable representation bases**
----------------------------------------------------------------------
:class:`GenericDVR` Parent DVR basis class.
:class:`SimpleDVR` Simple one-dimensional DVRs.
:class:`Contracted` Contracted DVR.
-------------------------------- -----------------------------------
**FBR quadrature bases**
----------------------------------------------------------------------
:class:`NDBasis` Parent class for :math:`n`-d quadrature bases
:class:`SinCosBasis` A sine-cosine (real Fourier) basis.
:class:`LegendreLMCosBasis` Associated Legendre polynomials.
:class:`RealSphericalHBasis` Real spherical harmonics.
:class:`Real2DHOBasis` Two-dimensional harmonic oscillator.
:class:`RadialHOBasis` Radial HO basis in :math:`d` dimensions.
================================ ===================================
"""
# Import main module into name-space
from . import genericbasis
from .genericbasis import *
from . import dvr
from .dvr import *
from . import ndbasis
from .ndbasis import *
# Load submodules
from . import ops # DVR operators
__all__ = []
__all__ += genericbasis.__all__
__all__ += dvr.__all__
__all__ += ndbasis.__all__
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from skimage.measure import marching_cubes
from scipy import interpolate
def gridshape(dvrs):
"""
Return the shape of the N-D formed by a list of DVRs.
Parameters
----------
dvrs : list
Each element is a GenericDVR object or a fixed-value
scalar.
Returns
-------
shape : tuple
The N-D DVR grid shape.
"""
shape = []
for i in range(len(dvrs)):
if isinstance(dvrs[i], dvr.GenericDVR):
# Grid coordinate
shape.append(dvrs[i].num)
else:
# Fixed coordinate
shape.append(1)
# Grid shape, including singleton fixed coordinates
shape = tuple(shape)
return shape
def dvr2grid(dvrs):
"""
Create N-D grids from a list of DVRs.
Parameters
----------
dvrs : list
Each element is a GenericDVR object or a fixed-value
scalar.
Returns
-------
grid : ndarray
An (N+1)-D ndarray of the stacked meshgrids
"""
grids = []
vshape = []
for i in range(len(dvrs)):
if isinstance(dvrs[i], dvr.GenericDVR):
# Grid coordinate
grids.append(dvrs[i].grid)
vshape.append(dvrs[i].num)
else:
# Fixed coordinate
grids.append(dvrs[i]) # scalar value
vshape.append(1)
# Grid shape, including singleton fixed coordinates
vshape = tuple(vshape)
# Calculate the coordinate grids
Q = np.stack(np.meshgrid(*grids, indexing = 'ij'))
return Q
def bases2grid(bases):
"""
Create direct product grids from a list of GriddedBasis objects
and scalars.
Parameters
----------
bases : list
Each element is a :class:`~nitrogen.genericbasis.GriddedBasis` object or a
fixed-value scalar.
Returns
-------
grid : ndarray
"""
grids = []
qshape = []
nq = 0
index_of_coord = []
for i,bas in enumerate(bases):
# if isinstance(bas, GenericDVR):
# grids.append(bas.grid)
# qshape.append(bas.num)
# nq += 1
# index_of_coord.append(i)
# elif isinstance(bas, NDBasis):
# for j in range(bas.nd) :
# grids.append(bas.qgrid[j])
# index_of_coord.append(i)
# qshape.append(bas.Nq)
# nq += bas.nd
# else:
# grids.append(bas)
# qshape.append(1)
# nq += 1
# index_of_coord.append(i)
# Use generic GriddedBasis interface
if np.isscalar(bas):
grids.append(bas) # The scalar value
qshape.append(1)
nq += 1
index_of_coord.append(i)
else:
# Assume GriddedBasis
for j in range(bas.nd):
grids.append(bas.gridpts[j])
index_of_coord.append(i)
qshape.append(bas.ng)
nq += bas.nd
Qi = []
for i in range(nq):
# The i**th coordinate
# spans the j**th index (axis)
j = index_of_coord[i]
newshape = [1]*len(qshape)
newshape[j] = qshape[j]
newshape = tuple(newshape)
gi = np.reshape(grids[i], newshape)
Qi.append(np.broadcast_to(gi, qshape))
Q = np.stack(Qi, axis = 0)
return Q
def plot(dvrs, fun, labels = None,
ls = 'k.-', mode2d = 'surface', isovalue = None):
"""
Plot a function over a 1-D or 2-D DVR grid
Parameters
----------
dvrs : list
List of GenericDVRs or fixed-value scalars.
fun : function or array
If function, f(Q) evaluates the vectorized grid function.
If an array, then fun is the same size of the return of
dvr2grid(dvrs).shape[1:]
labels : list of str, optional
Coordinate labels (including fixed).
ls : str, optional
1-D line spec.
mode2d: {'surface', 'contour'}, optional
2-D plot style.
isovalue : scalar or array_like
Isosurface value(s) for 3-D plot. If None (default), a
fixed fraction of the maximum absolute value will be used.
Returns
-------
fig, ax
Plot objects
"""
qgrid = dvr2grid(dvrs)
try: # Attempt to call fun as a function
ygrid = fun(qgrid)
except:
# If that fails, assume it is an ndarray grid
ygrid = fun.copy()
# Determine the non-singleton dimensions
idx = []
for i in range(len(ygrid.shape)):
if ygrid.shape[i] > 1:
idx.append(i)
ndim = len(idx)
if ndim < 1:
raise ValueError("There must be at least 1 non-singleton dimension")
elif ndim == 1:
#
# 1-D plot
#
fig = plt.figure()
x = qgrid[idx[0]].squeeze()
y = ygrid.squeeze()
plt.plot(x,y,ls)
ax = plt.gca()
if labels is not None:
plt.xlabel(labels[idx[0]])
elif ndim == 2:
#
# 2-D surface or contour plot
#
fig = plt.figure()
X = qgrid[idx[0]].squeeze()
Y = qgrid[idx[1]].squeeze()
Z = ygrid.squeeze()
if mode2d == 'surface':
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z,
cmap = cm.coolwarm,
linewidth = 0,
rcount = Z.shape[0],
ccount = Z.shape[1])
elif mode2d == 'contour':
plt.contour(X, Y, Z, levels = 50)
ax = plt.gca()
else:
raise ValueError("Unexpected mode2d string")
if labels is not None:
ax.set_xlabel(labels[idx[0]])
ax.set_ylabel(labels[idx[1]])
elif ndim == 3:
#
# 3-D isosurface plot
#
#x, y, z = pi*np.mgrid[-1:1:31j, -1:1:31j, -1:1:31j]
#vol = cos(x) + cos(y) + cos(z)
X = qgrid[idx[0]].squeeze()
Y = qgrid[idx[1]].squeeze()
Z = qgrid[idx[2]].squeeze()
V = ygrid.squeeze() # Value field
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if isovalue is None:
iso_val = np.array([ i * 0.2 * np.max(np.absolute(V)) for i in [-1., 1.]])
elif np.isscalar(isovalue):
iso_val = np.array([isovalue])
else:
iso_val = np.array(isovalue)
if len(iso_val) == 0:
raise ValueError("There must be at least one isovalue.")
elif len(iso_val) == 1:
color = ['b']
else:
min_val = np.min(iso_val)
max_val = np.max(iso_val)
P = (iso_val - min_val) / (max_val - min_val)
color = [ (1.0-p, 0.0, p) for p in P]
for i in range(len(iso_val)):
# Positive iso value
try:
verts, faces, _, _ = marching_cubes(V, iso_val[i], step_size = 1)
vxyz = [interpolate.interp1d(np.arange(dvrs[idx[i]].num), dvrs[idx[i]].grid)(verts[:,i]) for i in range(3)]
ax.plot_trisurf(vxyz[0], vxyz[1], faces, vxyz[2],
lw=1, color = color[i])
except:
pass
# marching_cubes will raise an error if
# the surface isovalue is not in the range.
# If this occurs, we will just not plot that
# isosurfaces
if labels is not None:
ax.set_xlabel(labels[idx[0]])
ax.set_ylabel(labels[idx[1]])
ax.set_zlabel(labels[idx[2]])
else:
raise ValueError("There are more than 3 dimensions.")
return fig, ax
def transferDVR(grid_old, dvrs_old, dvrs_new):
"""
Interpolate a direct-product DVR expansion from one
set of DVR bases to another.
Parameters
----------
grid_old : ndarray
A grid of coefficients for the old DVR direct-product grid,
with shape dvr2grid(dvrs_old).shape[1:]
dvrs_old: list
A list of GenericDVRs and/or scalar values. This is the original
set of grids defining `grid_old`.
dvrs_new : list
A list of GenericDVRS and/or scalar values. This defines the
new direct product grid. Scalar elements in `dvrs_new`
must occur at the same position in `dvrs_old`, but their
values are ignored.
Returns
-------
grid_new : ndarray
An array with shape dvr2grid(dvrs_new).shape[1:] containing
the expansion coefficients for the function represented by
`grid_old`.
"""
grids_old = []
vshape_old = []
grids_new = []
vshape_new = []
nd = len(dvrs_old) # The number of dimensions, including singletons
if nd != len(dvrs_new):
raise ValueError("dvrs_old and dvrs_new must be the same length")
for i in range(nd):
if isinstance(dvrs_old[i], dvr.GenericDVR):
# Grid coordinate
grids_old.append(dvrs_old[i].grid)
vshape_old.append(dvrs_old[i].num)
if not isinstance(dvrs_new[i], dvr.GenericDVR):
raise TypeError("DVR vs. scalar mis-match")
else:
grids_new.append(dvrs_new[i].grid)
vshape_new.append(dvrs_new[i].num)
else:
# Fixed coordinate
grids_old.append(None) # None signifies non-active here
vshape_old.append(1)
grids_new.append(None) # None signifies non-active here
vshape_new.append(1)
# Grid shape, including singleton fixed coordinates
vshape_old = tuple(vshape_old)
vshape_new = tuple(vshape_new)
# Evaluate the original expansion on the grid points
# of the new expansion
eval_old_on_new = grid_old
for i in range(nd):
if grids_old[i] is None:
continue # This is a singleton/fixed dimension. Skip it
# For the i**th dimension
# 1) Evaluate the old DVR wavefunctions
# on the new DVR grid points
#
ti = dvrs_old[i].wfs(dvrs_new[i].grid) # Shape: (num_new, num_old)
#
# 2) Convert the i**th dimension from old coefficients to the value
# on the new grid points
eval_old_on_new = np.tensordot(ti, eval_old_on_new, axes = (1,i))
eval_old_on_new = np.moveaxis(eval_old_on_new, 0, i)
#
# eval_old_on_new should now be complete
# Calculate the weights of the new direct-product DVR on its own grid.
wgt_new = np.ones(vshape_new)
for i in range(nd):
if grids_new[i] is not None:
# Calculate the diagonal basis function values for this DVR
wi = np.diag(dvrs_new[i].wfs(dvrs_new[i].grid))
sh = [1 for i in range(len(grids_new))]
sh[i] = dvrs_new[i].num
sh = tuple(sh) # (1, 1, ... , num, ..., 1, 1)
# Broadcast these weights to the ND grid
wgt_new *= wi.reshape(sh)
# Finally, calculate the coefficients of the new DVR functions
grid_new = eval_old_on_new / wgt_new
return grid_new
def _to_quad(bases, x, force_copy = False):
""" convert the mixed FBR representation
array to the quadrature array
"""
for i,b in enumerate(bases):
# i**th axis
if b is None or np.isscalar(b):
pass
else:
x = b.basis2grid(x,i)
if force_copy:
x = x.copy()
return x
def _to_fbr(bases, x, force_copy = False):
""" convert the quadrature array to
the mixed FBR representation array
"""
for i,b in enumerate(bases):
# i**th axis
if b is None or np.isscalar(b):
pass
else:
x = b.grid2basis(x,i)
if force_copy:
x = x.copy()
return x
def calcRhoLogD(bases, Q):
"""
Calculate the logarithmic derivative
of the basis set volume integration
function.
Parameters
----------
bases : list
List of GriddedBasis and scalars.
Q : ndarray
The coordinate values. `Q[i]` is an array
for the i**th coordinate.
Returns
-------
rhotilde : ndarray
`rhotilde[i]` is the logarithmic derivative
of :math:`\\rho` with respect to the `i`th
**active** coordinate.
"""
rhotilde = []
k = 0
for b in bases:
if np.isscalar(b):
# An inactive coordinate
# No entry
k += 1
else:
# Assume a GriddedBasis
if b.wgtfun is None:
for i in range(b.nd):
rhotilde.append(
|
np.zeros(Q.shape[1:])
|
numpy.zeros
|
# # -*- coding: UTF-8 -*-
# trial on the : Satomi machine
# Created by Ush on 2018/06/03
# Project name : class12_Spline curves
# Please contact CHIH, HSIN-CHING/D0631008 when expect to refer this source code.
# NOTE : no liability on any loss nor damage by using this source code. it is your own risk.
from __future__ import division
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
import scipy.linalg as la
import numpy as np
import cmath
from rdp import rdp
# http://pycallgraph.readthedocs.io/en/master/examples/basic.html#source-code
from math import sqrt # call sqrt from cmath for complex number
from numpy import matrix
from scipy.integrate import odeint
from pylab import *
from scipy.interpolate import CubicSpline
# http://mropengate.blogspot.com/2015/04/cubic-spline-interpolation.html
class NCM12:
def __init__(self, A, choice):
"do something here"
@staticmethod
def Problem1(x, y):
# input SegmentVectors, and x,y data
# output the line points inside the Segments
resolution = np.arange(0, 3.1, 0.1)
X = np.linspace(x[0], x[1] - 0.1, 10)
h = x[1] - x[0]
ss0 = (y[0] * (x[1] - X) / h) + y[1] * (X - x[0]) / h
X = np.linspace(x[1], x[2] - 0.1, 10)
h = x[2] - x[1]
ss1 = (y[1] * (x[2] - X) / h) + y[2] * (X - x[1]) / h
X = np.linspace(x[2], x[3], 11)
h = x[3] - x[2]
ss2 = (y[2] * (x[3] - X) / h) + y[3] * (X - x[2]) / h
ss0 = np.append(ss0, ss1)
ss0 = np.append(ss0, ss2)
return x, y, resolution, ss0
@staticmethod
def LineDrawing(s0, s1, s2, x, y):
# input SegmentVectors, and x,y data
# output the line points inside the Segments
X = np.linspace(x[0], x[1] - 0.1, 10)
ss0 = s0[0] * (X - x[0]) ** 3 + s0[1] * (X - x[0]) ** 2 + s0[2] * (X - x[0]) + s0[3]
X = np.linspace(x[1], x[2] - 0.1, 10)
ss1 = s1[0] * (X - x[1]) ** 3 + s1[1] * (X - x[1]) ** 2 + s1[2] * (X - x[1]) + s1[3]
X = np.linspace(x[2], x[3], 11)
ss2 = s2[0] * (X - x[2]) ** 3 + s2[1] * (X - x[2]) ** 2 + s2[2] * (X - x[2]) + s2[3]
ss0 = np.append(ss0, ss1)
ss0 = np.append(ss0, ss2)
return ss0
@staticmethod
# 首尾兩端點的微分值是被指定的,這裡分別定為A和B。
def Problem2_curve1_clamped_cubic(x, y, t_initial, t_end):
# input x, y
# output x, curve CS in cs, cs^(1), cs^(2)
# The first derivative at curves ends are zero
cs = CubicSpline(x, y, bc_type=((1, t_initial), (1, t_end)))
resolution = np.arange(0, 3.1, 0.1)
print(len(resolution))
# start the own code
d = []
u = [0]
h = [1, 1, 1] # this should be use the x[k+1]-x[k] in the real application
size = len(x)
for index in range(0, size - 1):
d = np.append(d, (y[index + 1] - y[index]) / h[index])
for index in range(0, size - 2):
u = np.append(u, 6 * (d[index + 1] - d[index]))
index, jndex = 0, 1
b = np.matrix([
[u[index + 1] - 3 * (d[index] - t_initial)],
[u[size - 2] - 3 * (t_end - d[size - 2])]
])
A = np.matrix([
[(3 / 2) * h[0] + 2 * h[1], h[1]],
[h[1], 2 * h[1] + (3 / 2) * h[1]]
])
xx = NCM07.choleski(A, b)
# table 5-8 clamped spline
m0 = ((3 / h[0]) * (d[0] - t_initial)) - (1 / 2) * xx[0]
m3 = (((3 / h[1]) * (t_end - d[2])) - ((1 / 2) * xx[1]))
# packing the m-list
m = [m0.item(0), xx.item(0), xx.item(1), m3.item(0)]
# original function -- create the function in piece-wise s0, s1 and s2
s0 = [(m[1] - m[0]) / (6 * h[0]), m[0] / 2, ((y[1] - y[0]) / h[0]) - h[0] * (2 * m[0] + m[1]) / 6, y[0]]
s1 = [(m[2] - m[1]) / (6 * h[1]), m[1] / 2, ((y[2] - y[1]) / h[1]) - h[1] * (2 * m[1] + m[2]) / 6, y[1]]
s2 = [(m[3] - m[2]) / (6 * h[2]), m[2] / 2, ((y[3] - y[2]) / h[2]) - h[2] * (2 * m[2] + m[3]) / 6, y[2]]
f0 = NCM12.LineDrawing(s0, s1, s2, x, y)
# first order derivate
s0_1df = [0, s0[0] * 3, s0[1] * 2, s0[2]]
s1_1df = [0, s1[0] * 3, s1[1] * 2, s1[2]]
s2_1df = [0, s2[0] * 3, s2[1] * 2, s2[2]]
f1 = NCM12.LineDrawing(s0_1df, s1_1df, s2_1df, x, y)
# second order derivate
s0_2df = [0, 0, s0_1df[1] * 2, s0_1df[2]]
s1_2df = [0, 0, s1_1df[1] * 2, s1_1df[2]]
s2_2df = [0, 0, s2_1df[1] * 2, s2_1df[2]]
# print("s0_2df, s1_2df, s2_2df :", s0_2df, s1_2df, s2_2df)
f2 = NCM12.LineDrawing(s0_2df, s1_2df, s2_2df, x, y)
return resolution, cs(resolution), cs(resolution, 1), cs(resolution, 2), f0, f1, f2
@staticmethod
# 首尾兩端沒有受到任何讓它們彎曲的力。
def Problem2_curve2_natural(x, y, t_initial, t_end):
cs = CubicSpline(x, y, bc_type=((2, t_end), (2, t_initial)))
resolution = np.arange(0, 3.1, 0.1)
# start the own code
d = []
u = [0]
h = [1, 1, 1] # this should be use the x[k+1]-x[k] in the real application
size = len(x)
for index in range(0, size - 1):
d = np.append(d, (y[index + 1] - y[index]) / h[index])
for index in range(0, size - 2):
u = np.append(u, 6 * (d[index + 1] - d[index]))
b = np.matrix([
[u.item(1)],
[u.item(2)]
])
# Lemma 5-2
A = np.matrix([
[2 * (h[0] + h[1]), h[1]],
[h[size - 3], 2 * (h[size - 3] + h[size - 2])]
])
xx = NCM07.choleski(A, b)
# table 5-8 Natual spline
m0 = 0
m3 = 0
# packing the m-list
m = [m0, xx.item(0), xx.item(1), m3]
# original function ,create the function in piece-wise s0, s1 and s2
s0 = [(m[1] - m[0]) / (6 * h[0]), m[0] / 2, ((y[1] - y[0]) / h[0]) - h[0] * (2 * m[0] + m[1]) / 6, y[0]]
s1 = [(m[2] - m[1]) / (6 * h[1]), m[1] / 2, ((y[2] - y[1]) / h[1]) - h[1] * (2 * m[1] + m[2]) / 6, y[1]]
s2 = [(m[3] - m[2]) / (6 * h[2]), m[2] / 2, ((y[3] - y[2]) / h[2]) - h[2] * (2 * m[2] + m[3]) / 6, y[2]]
f0 = NCM12.LineDrawing(s0, s1, s2, x, y)
# first order derivate
s0_1df = [0, s0[0] * 3, s0[1] * 2, s0[2]]
s1_1df = [0, s1[0] * 3, s1[1] * 2, s1[2]]
s2_1df = [0, s2[0] * 3, s2[1] * 2, s2[2]]
f1 = NCM12.LineDrawing(s0_1df, s1_1df, s2_1df, x, y)
# second order derivate
s0_2df = [0, 0, s0_1df[1] * 2, s0_1df[2]]
s1_2df = [0, 0, s1_1df[1] * 2, s1_1df[2]]
s2_2df = [0, 0, s2_1df[1] * 2, s2_1df[2]]
f2 = NCM12.LineDrawing(s0_2df, s1_2df, s2_2df, x, y)
return resolution, cs(resolution), cs(resolution, 1), cs(resolution, 2), f0, f1, f2
@staticmethod
def Problem2_curve2_NotAKnot(x, y):
cs = CubicSpline(x, y)
resolution = np.arange(0, 3.1, 0.1)
# start the own code
d = []
u = [0]
h = [1, 1, 1] # this should be use the x[k+1]-x[k] in the real application
size = len(x)
for index in range(0, size - 1):
d = np.append(d, (y[index + 1] - y[index]) / h[index])
for index in range(0, size - 2):
u = np.append(u, 6 * (d[index + 1] - d[index]))
b = np.matrix([
[u.item(1)],
[u.item(2)]
])
# Lemma 5-3
A = np.matrix([
[3 * h[0] + 2 * h[1] + h[0] ** 2 / h[1], h[1] - h[0] ** 2 / h[1]],
[h[size - 3] - h[size - 2] ** 2 / h[size - 3],
2 * h[size - 3] + 3 * h[size - 2] + (h[size - 2] ** 2 / h[size - 3])]
])
xx = NCM07.choleski(A, b)
# table 5-8 (iii) Not-A-Knot spline
m0 = xx.item(0) - (h[0] / h[1]) * (xx.item(1) - xx.item(0))
m3 = xx.item(1) + (h[2] / h[1]) * (-xx.item(0) + xx.item(1))
# packing the m-list
m = [m0, xx.item(0), xx.item(1), m3]
# original function
s0 = [(m[1] - m[0]) / (6 * h[0]), m[0] / 2, ((y[1] - y[0]) / h[0]) - h[0] * (2 * m[0] + m[1]) / 6, y[0]]
s1 = [(m[2] - m[1]) / (6 * h[1]), m[1] / 2, ((y[2] - y[1]) / h[1]) - h[1] * (2 * m[1] + m[2]) / 6, y[1]]
s2 = [(m[3] - m[2]) / (6 * h[2]), m[2] / 2, ((y[3] - y[2]) / h[2]) - h[2] * (2 * m[2] + m[3]) / 6, y[2]]
f0 = NCM12.LineDrawing(s0, s1, s2, x, y)
# first order derivate
s0_1df = [0, s0[0] * 3, s0[1] * 2, s0[2]]
s1_1df = [0, s1[0] * 3, s1[1] * 2, s1[2]]
s2_1df = [0, s2[0] * 3, s2[1] * 2, s2[2]]
f1 = NCM12.LineDrawing(s0_1df, s1_1df, s2_1df, x, y)
# second order derivate
s0_2df = [0, 0, s0_1df[1] * 2, s0_1df[2]]
s1_2df = [0, 0, s1_1df[1] * 2, s1_1df[2]]
s2_2df = [0, 0, s2_1df[1] * 2, s2_1df[2]]
f2 = NCM12.LineDrawing(s0_2df, s1_2df, s2_2df, x, y)
return resolution, cs(resolution), cs(resolution, 1), cs(resolution, 2), f0, f1, f2
@staticmethod
def Problem2_curve2_ParabolicallyTerminate(x, y):
cs = CubicSpline(x, y)
resolution = np.arange(0, 3.1, 0.1)
# start the own code
d = []
u = [0]
h = [1, 1, 1] # this should be use the x[k+1]-x[k] in the real application
size = len(x)
print("size : ", size)
for index in range(0, size - 1):
d = np.append(d, (y[index + 1] - y[index]) / h[index])
for index in range(0, size - 2):
u = np.append(u, 6 * (d[index + 1] - d[index]))
b = np.matrix([
[u.item(1)],
[u.item(2)]
])
# Lemma 5-4
A = np.matrix([
[3 * h[0] + 2 * h[1], h[1]],
[h[size - 3], 2 * h[size - 3] + 3 * h[size - 2]]
])
xx = NCM07.choleski(A, b)
m0 = xx.item(0)
m3 = xx.item(1)
# packing the m-list
m = [m0, xx.item(0), xx.item(1), m3]
# original function
s0 = [(m[1] - m[0]) / (6 * h[0]), m[0] / 2, ((y[1] - y[0]) / h[0]) - h[0] * (2 * m[0] + m[1]) / 6, y[0]]
s1 = [(m[2] - m[1]) / (6 * h[1]), m[1] / 2, ((y[2] - y[1]) / h[1]) - h[1] * (2 * m[1] + m[2]) / 6, y[1]]
s2 = [(m[3] - m[2]) / (6 * h[2]), m[2] / 2, ((y[3] - y[2]) / h[2]) - h[2] * (2 * m[2] + m[3]) / 6, y[2]]
f0 = NCM12.LineDrawing(s0, s1, s2, x, y)
# first order derivate
s0_1df = [0, s0[0] * 3, s0[1] * 2, s0[2]]
s1_1df = [0, s1[0] * 3, s1[1] * 2, s1[2]]
s2_1df = [0, s2[0] * 3, s2[1] * 2, s2[2]]
f1 = NCM12.LineDrawing(s0_1df, s1_1df, s2_1df, x, y)
# second order derivate
s0_2df = [0, 0, s0_1df[1] * 2, s0_1df[2]]
s1_2df = [0, 0, s1_1df[1] * 2, s1_1df[2]]
s2_2df = [0, 0, s2_1df[1] * 2, s2_1df[2]]
f2 = NCM12.LineDrawing(s0_2df, s1_2df, s2_2df, x, y)
return resolution, f0, f1, f2
@staticmethod
def Problem2_curve2_EndpointCurvatureAdjustSpline(x, y, t_initial, t_end):
# cs = CubicSpline(x, y)
resolution = np.arange(0, 3.1, 0.1)
# start the own code
d = []
u = [0]
h = [1, 1, 1] # this should be use the x[k+1]-x[k] in the real application
size = len(x)
for index in range(0, size - 1):
d = np.append(d, (y[index + 1] - y[index]) / h[index])
for index in range(0, size - 2):
u = np.append(u, 6 * (d[index + 1] - d[index]))
# Lemma 5-5
b = np.matrix([
[u.item(1) - h[0] * t_initial],
[u.item(2) - h[size - 2] * t_end]
])
A = np.matrix([
[2 * (h[0] + h[1]), h[1]],
[h[size - 3], 2 * (h[size - 3] + h[size - 2])]
])
xx = NCM07.choleski(A, b)
# table 5-8 (v) Endpoint curvature adjust Spline
m0 = t_initial
m3 = t_end
# packing the m-list
m = [m0, xx.item(0), xx.item(1), m3]
# original function
s0 = [(m[1] - m[0]) / (6 * h[0]), m[0] / 2, ((y[1] - y[0]) / h[0]) - h[0] * (2 * m[0] + m[1]) / 6, y[0]]
s1 = [(m[2] - m[1]) / (6 * h[1]), m[1] / 2, ((y[2] - y[1]) / h[1]) - h[1] * (2 * m[1] + m[2]) / 6, y[1]]
s2 = [(m[3] - m[2]) / (6 * h[2]), m[2] / 2, ((y[3] - y[2]) / h[2]) - h[2] * (2 * m[2] + m[3]) / 6, y[2]]
f0 = NCM12.LineDrawing(s0, s1, s2, x, y)
# first order derivate
s0_1df = [0, s0[0] * 3, s0[1] * 2, s0[2]]
s1_1df = [0, s1[0] * 3, s1[1] * 2, s1[2]]
s2_1df = [0, s2[0] * 3, s2[1] * 2, s2[2]]
f1 = NCM12.LineDrawing(s0_1df, s1_1df, s2_1df, x, y)
# second order derivate
s0_2df = [0, 0, s0_1df[1] * 2, s0_1df[2]]
s1_2df = [0, 0, s1_1df[1] * 2, s1_1df[2]]
s2_2df = [0, 0, s2_1df[1] * 2, s2_1df[2]]
f2 = NCM12.LineDrawing(s0_2df, s1_2df, s2_2df, x, y)
return resolution, f0, f1, f2
class NCM11:
def __init__(self, A, choice):
"do something here"
@staticmethod
# https://zh.wikipedia.org/wiki/道格拉斯-普克算法
# http://52north.github.io/wps-profileregistry/generic/dp-line-generalization.html
# https://github.com/nramm/maskiton/blob/master/server/plugins/appion/pyami/douglaspeucker.py
def RDP_middle(Px, Py, EPS):
result_x = []
result_y = []
recResults1_X = []
recResults1_Y = []
recResults2_X = []
recResults2_Y = []
dmax, index = 0, 0
length = len(Py)
for i in range(1, length - 2):
d = NCM11.d(Px[0], Py[0], Px[i], Py[i], Px[length - 1], Py[length - 1])
if (d > dmax):
index = i
dmax = d
if (dmax >= EPS):
# Recursive call
recResults1_X, recResults1_Y = NCM11.RDP_middle(Px[: index + 1], Py[:index + 1], EPS)
recResults2_X, recResults2_Y = NCM11.RDP_middle(Px[index:], Py[index:], EPS)
# Build the result list
result_x = np.vstack((recResults1_X[:-1], recResults2_X))
result_y = np.vstack((recResults1_Y[:-1], recResults2_Y))
else:
result_x = np.vstack((Px[0], Px[-1]))
result_y = np.vstack((Py[0], Py[-1]))
return result_x, result_y
@staticmethod
# FMI : find middle index
def FMI(Py):
middle = float(len(Py)) / 2
if middle % 2 != 0:
middle = int(middle - 0.5)
return middle
@staticmethod
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
def rdp_Ramer_Douglas_Pecker(Px, Py, EPS):
# https://pypi.org/project/rdp/
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
result = rdp(np.column_stack((Px, Py)), epsilon=EPS)
return [row[0] for row in result], [row[1] for row in result]
@staticmethod
def Standard_Deviation_Method(Px, Py, EPS):
result_x = []
result_y = []
MAF = []
x_start = Px[0]
y_start = Py[0]
max_samples = 3
EPS = EPS * 0.25
result_x = np.append(result_x, x_start)
result_y = np.append(result_y, y_start)
p_size = Py.shape[0]
for index in range(1, p_size - 1):
Pack1x = np.array([Px[index - 1], Px[index], Px[index + 1]])
SD1x = np.std(Pack1x)
Pack1y = np.array([Py[index - 1], Py[index], Py[index + 1]])
SD1y = np.std(Pack1y)
MAF = np.append(MAF, sqrt(SD1x ** 2 + SD1y ** 2))
Average = np.mean(MAF)
if len(MAF) == max_samples:
MAF = np.delete(MAF, 0)
print(index, sqrt(SD1x ** 2 + SD1y ** 2), Average)
if (sqrt(SD1x ** 2 + SD1y ** 2) - Average) > (EPS):
result_x = np.append(result_x, Px[index])
result_y = np.append(result_y, Py[index])
else:
pass
result_x = np.append(result_x, Px[p_size - 1])
result_y = np.append(result_y, Py[p_size - 1])
return result_x, result_y
@staticmethod
def Simplification_Perpendicular_Distance(Px, Py, epsilon):
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
result_x = []
result_y = []
x_start = Px[0]
y_start = Py[0]
result_x = np.append(result_x, x_start)
result_y = np.append(result_y, y_start)
p_size = Py.shape[0]
for index in range(1, p_size - 1):
x_target = Px[index]
y_target = Py[index]
x_end = Px[index + 1]
y_end = Py[index + 1]
d_result = NCM11.d(x_start, y_start, x_target, y_target, x_end, y_end)
if (d_result > epsilon): # keep the original data and save into output vector
result_x = np.append(result_x, Px[index])
result_y = np.append(result_y, Py[index])
x_start = Px[index] # load the next number
y_start = Py[index]
else: # skip the data
pass
# load the last data
result_x = np.append(result_x, Px[p_size - 1])
result_y =
|
np.append(result_y, Py[p_size - 1])
|
numpy.append
|
import cv2
import numpy as np
####################################
classFile = 'cocoNames/coco.names'
classNames=[]
WH_target = 320
modelConfig = "yolov3.cfg"
modelWeights= "yolov3.weights"
webcam = False
saveimg= True
image_file = 'me.jpeg'
#####################################
if webcam:
video = cv2.VideoCapture('/dev/video0')
assert(video.isOpened() == True)
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
# modelConfig = "yolov3-tiny.cfg"
# modelWeights= "yolov3-tiny.weights"
net = cv2.dnn.readNetFromDarknet(modelConfig, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
def findObject(outputs, img, confidenceThr = 0.5, nms_threshold=0.3):
hT, wT, cT = img.shape
bboxes = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId=
|
np.argmax(scores)
|
numpy.argmax
|
import nibabel as nb
import numpy as np
# function to load mesh geometry
def load_mesh_geometry(surf_mesh):
# returns coords, numbers of neighbours per vertex, and indices of neighbours
if isinstance(surf_mesh, str):
if (surf_mesh.endswith('orig') or surf_mesh.endswith('pial') or
surf_mesh.endswith('white') or surf_mesh.endswith('sphere') or
surf_mesh.endswith('inflated')):
coords, faces = nb.freesurfer.io.read_geometry(surf_mesh)
elif surf_mesh.endswith('gii'):
coords, faces = nb.gifti.read(surf_mesh).getArraysFromIntent(nb.nifti1.intent_codes['NIFTI_INTENT_POINTSET'])[0].data, \
nb.gifti.read(surf_mesh).getArraysFromIntent(nb.nifti1.intent_codes['NIFTI_INTENT_TRIANGLE'])[0].data
elif surf_mesh.endswith('vtk'):
coords, faces, _ = read_vtk(surf_mesh)
elif surf_mesh.endswith('ply'):
coords, faces = read_ply(surf_mesh)
elif surf_mesh.endswith('obj'):
coords, faces = read_obj(surf_mesh)
elif isinstance(surf_mesh, dict):
if ('faces' in surf_mesh and 'coords' in surf_mesh):
coords, faces = surf_mesh['coords'], surf_mesh['faces']
else:
raise ValueError('If surf_mesh is given as a dictionary it must '
'contain items with keys "coords" and "faces"')
else:
raise ValueError('surf_mesh must be a either filename or a dictionary '
'containing items with keys "coords" and "faces"')
neighbours, counts = get_neighbours(faces)
return {'coords':coords,'neighbour_count':counts, 'neighbours':neighbours}
def get_neighbours(triangles):
"""Get neighbours from triangles"""
n_vert = np.max(triangles)+1
neighbours=[[] for i in range(n_vert)]
counts=[]
for tri in triangles:
neighbours[tri[0]].extend([tri[1],tri[2]])
neighbours[tri[2]].extend([tri[0],tri[1]])
neighbours[tri[1]].extend([tri[2],tri[0]])
#Get unique neighbours
for k in range(len(neighbours)):
neighbours[k]=f7(neighbours[k])
counts.append(len(neighbours[k]))
return neighbours, counts;
def f7(seq):
#returns uniques but in order to retain neighbour triangle relationship
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))];
# function to load mesh data
def load_mesh_data(surf_data, gii_darray=0):
# if the input is a filename, load it
if isinstance(surf_data, str):
if (surf_data.endswith('nii') or surf_data.endswith('nii.gz') or
surf_data.endswith('mgz')):
data = np.squeeze(nb.load(surf_data).get_data())
elif (surf_data.endswith('curv') or surf_data.endswith('sulc') or
surf_data.endswith('thickness')):
data = nb.freesurfer.io.read_morph_data(surf_data)
elif surf_data.endswith('annot'):
data = nb.freesurfer.io.read_annot(surf_data)[0]
elif surf_data.endswith('label'):
data = nb.freesurfer.io.read_label(surf_data)
# check if this works with multiple indices (if dim(data)>1)
elif surf_data.endswith('gii'):
fulldata = nb.gifti.giftiio.read(surf_data)
n_vectors = len(fulldata.darrays)
if n_vectors == 1:
data = fulldata.darrays[gii_darray].data
else:
print("Multiple data files found, output will be matrix")
data = np.zeros([len(fulldata.darrays[gii_darray].data), n_vectors])
for gii_darray in range(n_vectors):
data[:,gii_darray] = fulldata.darrays[gii_darray].data
elif surf_data.endswith('vtk'):
_, _, data = read_vtk(surf_data)
elif surf_data.endswith('txt'):
data=np.loadtxt(surf_data)
else:
raise ValueError('Format of data file not recognized.')
elif isinstance(surf_data, np.ndarray):
data = np.squeeze(surf_data)
return data
## function to write mesh data
def save_mesh_data(fname, surf_data):
if isinstance(fname, str) and isinstance(surf_data,np.ndarray):
if (fname.endswith('curv') or fname.endswith('thickness') or
fname.endswith('sulc')):
nb.freesurfer.io.write_morph_data(fname,surf_data)
elif fname.endswith('txt'):
np.savetxt(fname,surf_data)
elif fname.endswith('vtk'):
if 'data' in surf_dict.keys():
write_vtk(fname,surf_dict['coords'],surf_dict['faces'],surf_dict['data'])
else:
write_vtk(fname,surf_dict['coords'],surf_dict['faces'])
elif fname.endswith('gii'):
print('please write lovely write gifti command')
elif fname.endswith('mgh'):
print('please write lovely write mgh command, or retry saving as .curv file')
else:
raise ValueError('fname must be a filename and surf_data must be a numpy array')
# function to read vtk files
# ideally use pyvtk, but it didn't work for our data, look into why
def read_vtk(file):
'''
Reads ASCII coded vtk files using pandas,
returning vertices, faces and data as three numpy arrays.
'''
import pandas as pd
import csv
# read full file while dropping empty lines
try:
vtk_df=pd.read_csv(file, header=None, engine='python')
except csv.Error:
raise ValueError('This vtk file appears to be binary coded currently only ASCII coded vtk files can be read')
vtk_df=vtk_df.dropna()
# extract number of vertices and faces
number_vertices=int(vtk_df[vtk_df[0].str.contains('POINTS')][0].iloc[0].split()[1])
number_faces=int(vtk_df[vtk_df[0].str.contains('POLYGONS')][0].iloc[0].split()[1])
# read vertices into df and array
start_vertices= (vtk_df[vtk_df[0].str.contains('POINTS')].index.tolist()[0])+1
vertex_df=pd.read_csv(file, skiprows=range(start_vertices), nrows=number_vertices, sep='\s*', header=None, engine='python')
if np.array(vertex_df).shape[1]==3:
vertex_array=
|
np.array(vertex_df)
|
numpy.array
|
#-------------------------------------------
# Source code for GP dataset creation
#
# By: <NAME>, <NAME>
#-------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
import copy, os, pickle, argparse, time
import george # main package used for generating GPs
from george.kernels import ExpSquaredKernel
from scipy import interpolate
# The folder where the GP samples will be saved.
sample_folder = 'gp-samples/'
cross_folder = 'gp-crosses/'
# Class for generating GP samples.
class generate_patch():
"""
This class generates a GP and returns a tuple of (sample, sample size).
Note that it is optimized for samples of size 1024, but can generate
any size sample with a little tweaking of the parameters.
'size' determines the size of the sample generated before upscaling.
The final size of the sample is size * upscale_factor.
'corr_length' is determines the rough diameter of the GP blobs
and can only be specified for the base GP (i.e. cannot be
simultaneously defined with a subpatch)
'subpatch' = None:
In this case, the generate function of the instance
runs the George package with the size and corr_length
specified.
'subpatch' = generate_patch instance:
In this case, the generate function fills a sample
given by size unifromly with the given generate_patch
instance.
'envelope' = True multiplies the generated sample by a Gaussian
such that it falls off to zero near the edges. (This is
required for the base (subpatch) generator to get a
smooth sample)
'wrap' = True wrap the sample on a torus. That is identifies the sides
of the sample near the edge. (Use this on the sampling instance,
not on the subpatch instance.)
'wrap_factor' = 1 +/- offset adjusts the toroidal wrapping so that the
sample remains uniform. To adjust this factor, turn test=True.
Then, if there is a gap in the generated toroidal sample,
increase wrap_factor. If there is an overlap, reduce. Such
that in the end it is uniform.
'test' = True, if turned on for the big GP (not subpatch) replaces the
GP so we can adjust the wrap_factor to make sure everything is
uniform.
"""
def __init__(self, size, corr_length = None, upscale_factor = 1, subpatch = None,
envelope = True, wrap = False, wrap_factor = 1, test = False):
assert type(subpatch)==generate_patch or subpatch==None,\
"'subpatch' must be either None or a generate_patch class instance."
assert subpatch==None or corr_length==None,\
"'corr_length' can only be defined for the base instance, i.e."\
+ " it cannot be specified with a subpatch instance simultaneaously."
self.wrap_factor = wrap_factor; self.size = size
self.corr_length = corr_length; self.subpatch = subpatch
self.upscale_factor = upscale_factor; self.wrap = wrap
self.test = test; self.envelope = envelope
# Defining the boundary. This is used for torodial wrapping.
# The numbers are related to the size of the Gaussian envelope.
if subpatch == None:
self.boundary = size / 5
# Effective size is the size that survives the Gaussian envelope.
self.effective_size = size - 2 * self.boundary
else:
self.boundary = subpatch.boundary * 4 * self.subpatch.upscale_factor
# The sample generating function.
def generate(self):
n_points = self.size
if self.subpatch == None: # i.e. for base instance.
# Defining the scale for George for having corr_length ~ blob diamater.
scale = (self.corr_length/self.size)**2
kernel = ExpSquaredKernel(scale, ndim=2)
gp = george.GP(kernel, solver=george.HODLRSolver)
# Creating a grid of points for George input.
x_points = y_points = np.linspace(0, 1, n_points)
xx, yy = np.meshgrid(x_points, y_points)
indep = np.vstack((np.hstack(xx),np.hstack(yy))).T
if self.test:
patch = np.ones([n_points,n_points])/2
else:
# Calling on George to create the samples.
patch = gp.sample(indep).reshape((n_points,n_points))
# Using interpolating to upscale the result if requested.
if self.upscale_factor > 1:
f = interpolate.interp2d(x_points, y_points, patch, kind='cubic')
x_points = y_points = np.linspace(0, 1, np.int(n_points * self.upscale_factor))
patch = f(x_points, y_points)
# Creating and applying the Gaussian envelope. The coefficient
# in the exp (in this case 23), determines how big the envelope is.
if self.envelope:
envelope = np.exp(-23*((x_points.reshape(-1,1)-0.5)**2 + (y_points.reshape(1,-1)-0.5)**2))
patch = patch * envelope
return patch, self.size * self.upscale_factor
else: # i.e. subpatch is another instance.
# Initiating the sample
n_points = self.size
patch = np.zeros([n_points , n_points])
# Defining the upscaled full subpatch size
subpatch_size = np.int(self.subpatch.size * self.subpatch.upscale_factor)
# Figuring out how many subpatches we need to cover the sample (size / effective subpatch size)
subpatch_eff_size = np.int(self.subpatch.effective_size * self.subpatch.upscale_factor)
ratio = n_points / subpatch_eff_size
factor = 5000 if self.test else 1 # If testing for unifromity, sample a LOT of patches.
# The location of where the subpatch smample is to be placed (locs gives the top left corner)
locs = np.random.randint(0, n_points - subpatch_size, [np.int(6 * ratio**2 * factor),2])
# Drawing the subpatch samples.
if self.test == False:
for loc_pair in locs:
patch[loc_pair[0]:loc_pair[0]+subpatch_size,
loc_pair[1]:loc_pair[1]+subpatch_size] += self.subpatch.generate()[0]
else:
for loc_pair in locs:
patch[loc_pair[0],loc_pair[1]]+=0.1 #If testing, just put 0.1
# Torodial wrapping
if self.wrap == True:
w = self.wrap_factor
patch[:np.int(w*self.boundary+0.5),:] += patch[-np.int(w*self.boundary+0.5):,:]
patch[-np.int(w*self.boundary+0.5):,:] = patch[:np.int(w*self.boundary+0.5),:]
patch[:,:np.int(w*self.boundary+0.5)] += patch[:,-np.int(w*self.boundary+0.5):]
patch[:,-np.int(w*self.boundary+0.5):] = patch[:,:np.int(w*self.boundary+0.5)]
# Upscaling using interpolation.
if self.upscale_factor > 1.0:
x_points = y_points =
|
np.linspace(0, 1, n_points)
|
numpy.linspace
|
import sys
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pickle
def load_obj(name):
pkl_path = ""
with open(pkl_path + name + ".pkl", 'rb') as f:
return pickle.load(f)
def load_prun_obj(name):
pkl_path = ""
with open(pkl_path + name + ".pkl", 'rb') as f:
return pickle.load(f)
def result_plt(results, label):
# lists = sorted(results.items())
# x, y = zip(*lists)
plt.plot(results, label = label)
matrices = ['acc', 'loss']
# labels = ['fedavg_5iid_5niid', 'fedavg_6iid_4niid', 'fedavg_2iid_8niid', 'fedavg_8iid_2niid' ]
# labels_prun = ['fedavg_5iid_5niid_prun', 'fedavg_6iid_4niid_prun', 'fedavg_8iid_2niid_prun']
labels = ['FedPNS', 'BN2', 'FedAvg' ]
labels_prun = ['iid', 'niid']
# iid_list = [5, 6, 2, 8]
# niid_list = [10 - x for x in iid_list]
iid_list = [10]
niid_list = [10]
prob_ratio = [0.1]
model = [ 'cnn']
num_exp = 5
num_exp_3 = 3
num_round = 200
def define_and_get_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run figure plot"
)
parser.add_argument("--matrice", type=str, choices=matrices, default="acc", help = "result matrices")
parser.add_argument("--iid", type=int, default=5, help="number of nodes")
parser.add_argument("--training_rounds", type=int, default = 50, help= "number of training rounds")
args = parser.parse_args(args=args)
return args
def main():
args = define_and_get_arguments()
fedavg_data = {}
fedadp_data = {}
feddel_data = {}
remove_node = {}
# x = load_obj('fedbn2_cifar_cnn_5_exp4' )
# print(x[0])
for exp in range(1,num_exp+1):
fedadp_data[exp] = load_obj('fedsel_mnist_%s_1_exp%s_0.5_2.0_labeled' %(model[0], exp))
fedavg_data[exp] = load_obj('fedavg_mnist_%s_1_exp%s_0.5' %(model[0],exp))
feddel_data[exp] = load_obj('fedbn2_mnist_%s_1_exp%s_0.5' %(model[0], exp))
if args.matrice == "acc":
ax = plt.subplot(111)
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(fedadp_data[k][0])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp = np.mean(temp_adp, axis=0)
# print(acc_adp)
ax.plot(list(range(num_round)), acc_adp, color='c', linewidth = '2',label = labels[0])
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(fedavg_data[k][0])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp = np.mean(temp_adp, axis=0)
# print(acc_adp)
ax.plot(list(range(num_round)), acc_adp, '--',color='#F97306', linewidth = '2', label = labels[-1])
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(feddel_data[k][0])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp = np.mean(temp_adp, axis=0)
ax.plot(list(range(num_round)), acc_adp, linewidth = '2', label = labels[1])
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,70,95))
# plt.gca().spines['right'].set_position(('data',0))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylabel('Test Accuracy', fontsize=13)
plt.xlabel('Communication Round', fontsize=13)
plt.legend(frameon=False, loc=7, prop={'size': 10})
elif args.matrice == "loss":
# overall_avg = []
# for k in range(1,num_exp+1):
# # print(fedadp_data[k][0])
# overall_avg.extend(feddel_data[k][2])
# temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
# acc_adp = np.mean(temp_adp, axis=0)
# # print(acc_adp)
# result_plt(acc_adp, labels[1])
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(fedadp_data[k][2])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp =
|
np.mean(temp_adp, axis=0)
|
numpy.mean
|
import numpy as np
from scipy import signal
from scipy.io import wavfile
from scipy.stats import linregress, kurtosis
from .process import make_filterbank, A_weighting
def revtime(ir_input, method='rt20', fs=48000, tmax=3.0):
'''
Calcula el tiempo de reverberacion y la integral de Schroeder a partir de la respuesta
impulso almacenada en ir_input (array numpy o nombre de archivo wav) usando el metodo 'rt30' 'rt20' o 'edt'
Lo hace para cada canal del archivo fileir.
Atencion asume por defecto una decaimiento maximo de 3 segundos (no TR, decaimiento a piso de ruido
lo cual es un asuncion razonable en la mayor parte de los casos)
Devuelve por orden:
- el (los) tiempos de reverberacion en segundos
- tiempo incial y final en los que ajusto la recta del decaimiento
- nivel inicial y final en dB de la recta del decaimiento (util para el metodo rmax todavia no implementado)
- integral(es) de Schroeder de toda la respuesta impulso
'''
if type(ir_input) is str:
fs, data = wavfile.read(ir_input + '.wav')
elif type(ir_input) is np.ndarray:
data = ir_input
else:
raise TypeError('Primer argumento debe ser el array devuelto por extractir o un nombre de archivo')
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
nsamples, nchan = np.shape(data)
nmax = int(min(tmax*fs,nsamples))
schr = np.zeros((nchan,nmax))
snr = np.zeros((nchan,))
rt = np.zeros((nchan,))
t12 = np.zeros((nchan,2))
l12 = np.zeros((nchan,2))
rvalue = np.zeros((nchan,))
for n, ir in enumerate(data.T):
ns = np.mean(ir[nmax:-1][0]**2)
stemp = np.flip(np.cumsum(ir[::-1]**2)) # integral de Schroeder
stemp = stemp[:nmax]
mm = np.arange(nsamples,0,-1)*ns
n_ns = np.argmax(stemp<mm[:nmax]*np.sqrt(2))
xv = 10*np.log10(stemp)-10*np.log10(mm[:nmax])
xv = xv - np.amax(xv)
tv = np.arange(nsamples)/fs # array de tiempo
snr[n] = -xv[-1] # full range del decaimiento SNR
schr[n][:nmax] = xv
if method.lower() == 'rt30' and snr[n]>35:
# Calcula RT usando la definicion del RT30
pt1 = np.argmax(xv<-5)
pt2 = np.argmax(xv<-35)
elif method.lower() == 'rt20' and snr[n]>25:
# Idem la definicion del RT20
pt1 = np.argmax(xv<-5)
pt2 = np.argmax(xv<-25)
elif method.lower() == 'rt15' and snr[n]>20:
# Idem la definicion del RT20
pt1 = np.argmax(xv<-5)
pt2 = np.argmax(xv<-20)
elif method.lower() == 'edt' and snr[n]>10.5:
# Calculo del decaimiento temprano EDT
pt1 = np.argmax(xv<-0.5)
pt2 = np.argmax(xv<-10.5)
else:
return rt, t12, l12, schr, snr, rvalue
slope, intercept, r_value, _, _ = linregress(tv[pt1:pt2], xv[pt1:pt2])
rt[n] = -(intercept + 60)/slope
t12[n] = tv[[pt1,pt2]]
l12[n] = intercept+slope*tv[[pt1,pt2]]
rvalue[n] = r_value
return rt, t12, l12, schr, snr, rvalue
def clarity(ir_input, fs=48000, tmax = 3.0):
'''
Calcula valores de claridad C80 C50 y centro temporal ts a partir de la respuesta impulso ir
mas adelante deberia tener en cuenta la relacion senal ruido para no sobreestimar la reverberacion
'''
if type(ir_input) is str:
fs, data = wavfile.read(ir_input + '.wav')
elif type(ir_input) is np.ndarray:
data = ir_input
else:
raise TypeError('Primer argumento debe ser el array devuelto por extractir o un nombre de archivo')
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
nsamples, nchan = np.shape(data)
nmax = int(min(tmax*fs,nsamples))
ndir = find_dir(data, pw=1.0,fs=fs)
c80 = np.zeros((nchan,))
c50 = np.zeros((nchan,))
ts = np.zeros((nchan,))
n80 = int(0.08*fs)
n50 = int(0.05*fs)
for n, ir in enumerate(data.T):
e50 = np.sum(np.square(ir[ndir[0,n]:ndir[0,n]+n50]))
e80 = np.sum(np.square(ir[ndir[0,n]:ndir[0,n]+n80]))
er = np.sum(np.square(ir[ndir[0,n]:nmax]))
etr = np.sum(np.multiply((np.arange(ndir[0,n],nmax)-ndir[0,n])/fs,np.square(ir[ndir[0,n]:nmax])))
c80[n] = 10*np.log10(e80/(er-e80))
c50[n] = 10*np.log10(e50/(er-e50))
ts[n] = 1000*etr/er
return c80, c50, ts
def paracoustic(ir, method='rt20', bankname='fbank', tmax=3.0):
'''
Calcula los siguientes parametros acusticos POR BANDAS con los nombres de las keys correspondientes
Reververacion: 'rt30' (o el metodo que se pida), 'edt'
Claridad: 'c80', 'c50', 'ts',
Relacion senal ruido 'snr'
Directo reverberante 'dr'
a partir de la respuesta impulso almacenada en ir (array numpy o nombre de archivo wav)
Lo hace para cada canal del archivo fileir y para el banco de filtros almacenado en bankname (extension npz)
devueve un diccionario rev que tiene las siguientes keys: nchan (num canales), nbands (num bandas), fc (frecuencias)
tr20 (o tr30 o edt, array de nbands x nchan con los tiempos de reverberancia) tfit, lfit, dchr, lvalues son
las salidas de revtime (ver) para cada banda. La banda 0 es wideband (fc = 1)
'''
# si bankname es None lo hace wideband
# dar la opcion de no calcular el filtro A
try:
fbank = np.load(bankname + '.npz')
except:
print('Generating new filter bank ')
try:
fs, _ = wavfile.read(ir + '.wav')
except:
raise Exception('Cannot infer sample rate. Please provide wav file or filter bank with specified sample rate')
if (len(bankname.split('_')) > 1):
(noct,bwoct) = [int(ss) for ss in bankname.split('_')[-2:]]
make_filterbank(noct=noct,bwoct=bwoct,bankname=bankname,fs=fs)
else:
make_filterbank(bankname='fbank',fs=fs)
fbank = np.load(bankname + '.npz')
if type(ir) is str:
fs, data = wavfile.read(ir + '.wav')
if fs != fbank['fs']:
raise Exception('Inconsistent sample rate between audio file and filter bank')
elif type(ir) is np.ndarray:
data = ir
fs = fbank['fs']
print('Using sample rate from filter bank:' + str(fs))
else:
raise TypeError('Input must be ndarray or filename')
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
nbands, _, _ = fbank['sos'].shape
# some stats
pstat = irstats(data, fs=fs)
tmixing = np.mean(pstat['mixing'][0,:])
tnoise = np.mean(pstat['tnoise'][0,:])
nsamples, nchan = np.shape(data)
nmax = int(min(tmax*fs,nsamples))
listofkeys = ['nchan','nbands','fc',method,'edt','tfit','lfit','schr','rvalue','snr','c80','c50','ts','dr']
pars = dict.fromkeys(listofkeys,0 )
pars['nchan'] = nchan
pars['nbands'] = nbands+2
pars['fc'] = [None]*pars['nbands']
pars[method] = np.zeros((pars['nbands'],pars['nchan']))
pars['edt'] = np.zeros((pars['nbands'],pars['nchan']))
pars['tfit'] = np.zeros((pars['nbands'],pars['nchan'],2))
pars['lfit'] =
|
np.zeros((pars['nbands'],pars['nchan'],2))
|
numpy.zeros
|
# plot curve
import numpy as np
import matplotlib.pyplot as plt
# read data
loss_list = ['loss_DQN.npy','loss_DDQN.npy','loss_dueling.npy']
q_list = ['q_DQN.npy','q_DDQN.npy','q_dueling.npy']
reward_list = ['reward_DQN.npy','reward_DDQN.npy','reward_dueling.npy']
try:
loss_DQN = np.load(loss_list[0])
loss_DDQN = np.load(loss_list[1])
loss_dueling = np.load(loss_list[2])
q_DQN = np.load(q_list[0])
q_DDQN = np.load(q_list[1])
q_dueling = np.load(q_list[2])
reward_DQN = np.load(reward_list[0])
reward_DDQN = np.load(reward_list[1])
reward_dueling = np.load(reward_list[2])
except:
print("load data failed!")
# moving average
def moving_average(a, n=3) :
ret =
|
np.cumsum(a, dtype=float)
|
numpy.cumsum
|
import numpy as np
import scipy.sparse as sp
from docarray.math.helper import minmax_normalize, update_rows_x_mat_best
def test_minmax_normalization_1d():
a = np.array([1, 2, 3])
np.testing.assert_almost_equal(minmax_normalize(a), [0, 0.5, 1])
a_normalized = minmax_normalize(a, (1, 0))
np.testing.assert_almost_equal(a_normalized, [1, 0.5, 0])
def test_minmax_normalization_2d():
a = np.array([[1, 2, 3], [3, 2, 1]])
np.testing.assert_almost_equal(minmax_normalize(a), [[0, 0.5, 1], [1, 0.5, 0]])
a_normalized = minmax_normalize(a, (1, 0))
np.testing.assert_almost_equal(a_normalized, [[1, 0.5, 0], [0, 0.5, 1]])
def test_minmax_normalization_sparse():
a = sp.csr_matrix([[1, 2, 3], [3, 2, 1]])
np.testing.assert_almost_equal(minmax_normalize(a), [[0, 0.5, 1], [1, 0.5, 0]])
a_normalized = minmax_normalize(a, (1, 0))
np.testing.assert_almost_equal(a_normalized, [[1, 0.5, 0], [0, 0.5, 1]])
def test_minmax_normalization_zero():
a = np.array([[1, 1, 1], [3, 3, 3]])
np.testing.assert_almost_equal(minmax_normalize(a), [[0, 0, 0], [0, 0, 0]])
a_normalized = minmax_normalize(a, (1, 0))
|
np.testing.assert_almost_equal(a_normalized, [[1, 1, 1], [1, 1, 1]])
|
numpy.testing.assert_almost_equal
|
import tensorflow as tf
import os
import numpy as np
from tqdm import tqdm
from tensorflow.examples.tutorials.mnist import input_data
import sys
import argparse
from ae_modules import Encoder, Decoder
tf.logging.set_verbosity(tf.logging.INFO)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='BEGAN test')
parser.add_argument('--data_path', dest='path', default='fashion', type=str)
parser.add_argument('--hidden', dest='hidden', default=32, type=int, help='hidden size')
parser.add_argument('--conv-hidden', dest='conv_hidden', default=64, type=int, help='conv hidden size')
parser.add_argument('--gamma', dest='gamma', default=0.5, type=float, help='gamma value for equilibrium')
parser.add_argument('--lambda', dest='lambda_', default=0.001, type=float, help='lambda for control')
parser.add_argument('--lr', dest='lr', default=0.00001, type=float, help='start learning rate')
parser.add_argument('--batch', dest='batch', default=16, type=int, help='batch size')
parser.add_argument('--iter', dest='iter', default=1000000, type=int, help='num of iteration')
parser.add_argument('--gray', dest='gray', action='store_true', help='gray or color')
args = parser.parse_args()
model_folder = "./models/BEGAN_{}_gray_{}_{}_{}_{}_{}".format(args.path,
args.gray,
args.lambda_,
args.gamma,
args.hidden,
args.conv_hidden)
# data load
mnist = input_data.read_data_sets(args.path, one_hot=True)
# config
num_iter = args.iter
B = args.batch
h = args.hidden
n = args.conv_hidden
gamma_ = args.gamma
gamma = tf.constant(gamma_, dtype=tf.float32)
lambda_ = tf.Variable(args.lambda_,
dtype=tf.float32,
trainable=False)
starter_learning_rate = args.lr
k_initial = tf.constant(0, dtype=tf.float32)
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(starter_learning_rate,
global_step,
100,
0.98,
staircase=True)
lr_ = tf.Variable(lr,
dtype=tf.float32,
trainable=False)
# placeholder
# 여기는 mnist 처리...
mnist_images = tf.placeholder(tf.float32, (None, 28*28), name="RealImage")
if args.gray:
im = tf.reshape(mnist_images, [-1, 28, 28, 1])
else:
im = tf.reshape(mnist_images, [-1, 28, 28])
im = tf.stack([im, im, im],
axis=3)
im = tf.image.resize_nearest_neighbor(im, [32, 32])
real_images = 1 - im
#real_images = tf.placeholder(tf.float32, (None, 32, 32, 3), name="RealImage")
z = tf.placeholder(tf.float32, (None, h), name="z")
k_prev = tf.placeholder(tf.float32, [])
with tf.device("/gpu:0"):
# Real
latent_in_real, varE = Encoder(real_images, n, h, gray=args.gray)
restored_real, varD = Decoder(latent_in_real, n, h, name="D", gray=args.gray)
varDisc = varE + varD # Discriminator의 variable을 가져와야지
tf.summary.image("input_real", real_images)
tf.summary.image("output_real", restored_real)
# fake
fake_images, varGen = Decoder(z, n, h, name="G", gray=args.gray)
latent_in_fake, _ = Encoder(fake_images, n, h, reuse=True, gray=args.gray)
restored_fake, _ = Decoder(latent_in_fake, n, h, name="D", reuse=True, gray=args.gray)
tf.summary.image("input_fake", fake_images)
tf.summary.image("output_fake", restored_fake)
# real loss
L_x = tf.reduce_mean(tf.abs(real_images - restored_real))
tf.summary.scalar("Real Loss", L_x)
# fake loss
L_z = tf.reduce_mean(tf.abs(fake_images - restored_fake))
tf.summary.scalar("Fake Loss", L_z)
# Discriminator/Generator loss
L_D = L_x - k_prev * L_z
L_G = L_z
tf.summary.scalar("Discriminator Loss", L_D)
tf.summary.scalar("Generator Loss", L_G)
# control?
k_next = k_prev + lambda_*(gamma*L_x - L_z)
tf.summary.scalar("curr_K", k_prev)
tf.summary.scalar("next_K", k_next)
# convergence measure
M_global = L_x + tf.abs(gamma*L_x - L_z)
tf.summary.scalar("approx_convergence_measure", M_global)
summary = tf.summary.merge_all()
# gradient descent
opt_D = tf.train.AdamOptimizer(lr_)
opt_G = tf.train.AdamOptimizer(lr_)
# 주의! : loss에 따라 gradient를 적용할 variable들이 다르다!!
train_op_D = opt_D.minimize(L_D, var_list=varDisc)
train_op_G = opt_G.minimize(L_G, global_step, var_list=varGen)
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(model_folder,
sess.graph)
k_t_ = sess.run(k_initial)
t = tqdm(range(num_iter), desc="training BEGAN")
for epoch in t:
#### real_image ####
batch_xs, _ = mnist.train.next_batch(B)
#### fake_image ####
Z =
|
np.random.uniform(-1, 1, B * h)
|
numpy.random.uniform
|
# ----------------------------------------------------------------------
# Data statistics module
# ----------------------------------------------------------------------
# This module's purpose is to compute all relevant statistics and
# other data manipulation in order to extract information.
# It does not plot anything. To do this, go to visualization module.
#
# TODO:
# DONE compute KS distance for dedt_min
# -compute time scaling
# DONE compute space scaling
# DONE compute mask for all boxes
# DONE compute time average for 3 days
# DONE compute power law from data
# DONE compute CDF with histogram
# DONE compute alpha (exponent)
# -verify that every things works.
#
# CANCELED compute lagrangian trajectories from eulerian ones
# ----------------------------------------------------------------------
from matplotlib import pyplot as plt
import numpy as np
import libs.selection as sel
from libs.constants import *
class Scale(sel.Data):
"""
This class is a child class to sel.Data, and a parent class to vis.Arctic. Its main goal is to compute the wanted statistics on the scale.
"""
def __init__(
self,
directory: str = None,
time: str = None,
expno: str = None,
datatype: str = None,
tolerance: float = 0.1,
resolution: int = None,
nx: int = None,
ny: int = None,
):
"""
Class attributes for Scale.
Args:
directory (str, optional): directory from which to take data. Defaults to None.
time (str, optional): starting time, format supported: yyyy-mm-dd-hh-mm. Defaults to None.
expno (str, optional): experience number is of format nn. Defaults to None.
datatype (str, optional): data types currently supported are: ice concentration (A), ice thickness (h), ice velocity vector (u), ice temp (Ti) (needs tweeks for pcolor), and ice deformation (dedt). Defaults to None.
tolerance (float, optional): value at which dedt will be cut to get rid of high boundary values. Defaults to 0.1.
resolution (int, optional): spatial resolution of the domain of interest.
nx, ny (int, optional): number of cells in each direction.
"""
super(Scale, self).__init__(
directory=directory,
time=time,
expno=expno,
datatype=datatype,
tolerance=tolerance,
resolution=resolution,
nx=nx,
ny=ny,
)
def _box(self, scale: int, i: int, j: int) -> np.ndarray:
"""
Computes the mask for the box for a given scale, with a corner point (i ,j).
Args:
scale (int): scale in grid cell number (e.g. 2, 4, 8, etc).
i (int): position in ny of the corner of the box.
j (int): position in nx of the corner of the box.
Returns:
np.ndarray: mask of the box in a (ny, nx) grid.
"""
# boxes definition
if scale + i <= self.ny and scale + j <= self.nx:
indices = np.ix_(np.arange(scale) + i, np.arange(scale) + j)
elif scale + i > self.ny and scale + j > self.nx:
extra_i = scale + i - self.ny
extra_j = scale + j - self.nx
indices = np.ix_(
np.arange(scale - extra_i) + i, np.arange(scale - extra_j) + j
)
elif scale + i > self.ny and scale + j <= self.nx:
extra_i = scale + i - self.ny
indices = np.ix_(
np.arange(scale - extra_i) + i, np.arange(scale) + j
)
elif scale + i <= self.ny and scale + j > self.nx:
extra_j = scale + j - self.nx
indices = np.ix_(
np.arange(scale) + i, np.arange(scale - extra_j) + j
)
# create box by creating a mask of ones on the grid
box = np.full((self.ny, self.nx), 1, dtype=int)
box[indices] = 0
return box
def _signal_to_noise(self):
pass
def _time_average(self, formated_data: np.ndarray, dt: str) -> np.ndarray:
"""
Function that computes the time average over 3 days depdending on the time dicretization of the data.
Args:
formated_data (np.ndarray): array of size (ny, nx, nt) where each nt is a snapshot at a given time = time_ini + nt * dt
dt (str): time difference between two points of data.
Raises:
SystemExit: if data is not 1 day, 1 hour, or a multiple or 1 hour in minutes
Returns:
np.ndarray: all the means of size (ny , nx, 86(87)) (because 86(87) periods between 02/01 and 31/03 1997(2008))
"""
# create list
dtlist = [int(n) for n in dt.split("-") if n.isdigit()]
# if dt is days
if dtlist[0] == 1:
data_time_mean = [
(formated_data[..., 3 * n : 3 * n + 3].mean(axis=-1))
for n in range(formated_data.shape[-1] // 3)
]
# if dt is hours
elif dtlist[1] != 0:
period_per_day = 24 // dtlist[1]
data_time_mean = [
formated_data[
..., period_per_day * 3 * n : period_per_day * (3 * n + 3)
].mean(axis=-1)
for n in range(
(formated_data.shape[-1] // period_per_day) // 3
)
]
# if dt is minutes (unlikely)
elif dtlist[2] != 0:
period_per_day = 24 * 60 // dtlist[2]
data_time_mean = [
formated_data[
..., period_per_day * 3 * n : period_per_day * (3 * n + 3)
].mean(axis=-1)
for n in range(
(formated_data.shape[-1] - 3 * period_per_day)
// period_per_day
+ 1
)
]
else:
raise SystemExit(
"Unsupported time delta. Supported are 1 day, a multiple of 24 hours, or any multiple or 60 minutes."
)
return np.stack(data_time_mean, axis=-1)
def spatial_mean_box(
self,
formated_data: np.ndarray,
scales: list,
dt: str = None,
time_end: str = None,
from_velocity: bool = 0,
choice: int = 0,
) -> np.ndarray:
"""
Function that computes the lenght and deformation rate means over all boxes and all scales for all period of 3 days.
Args:
formated_data (np.ndarray): array of size (ny, nx, nt) or (ny, nx, 2, nt) where each nt is a snapshot at a given time = time_ini + nt * dt
scales (list): all scales under study in km.
dt (str, optional): time difference between two points of data. Defaults to None for one snapshot.
time_end (str, optional): time of the last point in the data. Defaults to None for one snapshot.
from_velocity (bool, optional): wether to compute deformations because using velocities. Defaults to 0. Only matters when using time average.
choice (int, optional): when computing vel_to_def, choice for which deformation to compute.
Raises:
SystemExit: when input scale is smaller than or equal to resolution of the data.
Returns:
data (np.ndarray): array of size (n_scales, nx * ny * 86(87), 2). First is number of scales, second is max number of boxes (if smaller, replace the rest by NaNs). Third are: 0: deformation mean, 1: lenght mean.
visc (np.ndarray): array of size (n_scales, nx * ny * 86(87)). Same thing but for the viscosity instead of deformation + lenght.
"""
# check if time average preprocessing is necessary
if len(formated_data.shape) >= 3:
# load viscosities
visc_raw = self.multi_load(dt, time_end, datatype="viscosity")
# time average the data
formated_data = self._time_average(formated_data, dt)
formated_visc = self._time_average(visc_raw, dt)
if from_velocity:
# compute the derivatives and the deformations
du, dv = self._derivative(
formated_data[:, :, 0, :], formated_data[:, :, 1, :]
)
formated_data = self._deformation(du, dv, choice)
formated_visc = formated_visc[1:-1, 1:-1, :]
# computes all the areas
areas = np.ones_like(formated_data[..., 0])
# initialize data array where we will put our means
data = np.empty(
(len(scales), self.ny * self.nx * formated_data.shape[-1], 2)
)
visc = np.empty(
(len(scales), self.ny * self.nx * formated_visc.shape[-1])
)
# loop over all scales
scale_iter = 0
for scale_km_unit in scales:
# verify validity of scale
if scale_km_unit <= self.resolution:
scale_iter += 1
raise SystemExit(
"Scale is smaller than or equal to resolution. It's not implemented yet."
)
# convert km into grid cell units
scale_grid_unit = scale_km_unit // self.resolution
# total number of boxes
box_iter = 0
# loop over all periods of 3 days
for period_iter in range(formated_data.shape[-1]):
# loops over all possible boxes that are in the domain
for i in range(0, self.ny, scale_grid_unit // 2):
for j in range(0, self.nx, scale_grid_unit // 2):
# verify that box is big enough (for boundaries).
mask = self._box(scale_grid_unit, i, j)
counts = np.unique(mask, return_counts=True)[1][0]
if counts >= scale_grid_unit ** 2 / 2:
# define arrays for mask
masked_data = np.ma.asarray(
formated_data[..., period_iter]
)
masked_areas = np.ma.asarray(areas)
masked_visc = np.ma.asarray(
formated_visc[..., period_iter]
)
# mask data with box + invalid
masked_data.mask = mask
masked_data =
|
np.ma.masked_invalid(masked_data)
|
numpy.ma.masked_invalid
|
import numpy as np
import pytest
from psyneulink.components.component import ComponentError
from psyneulink.components.functions.function import FunctionError
from psyneulink.components.functions.function import ConstantIntegrator, Exponential, Linear, Logistic, Reduce, Reinforcement, SoftMax
from psyneulink.components.functions.function import ExponentialDist, GammaDist, NormalDist, UniformDist, WaldDist, UniformToNormalDist
from psyneulink.components.mechanisms.mechanism import MechanismError
from psyneulink.components.mechanisms.processing.transfermechanism import TransferError, TransferMechanism
from psyneulink.globals.utilities import UtilitiesError
from psyneulink.components.process import Process
from psyneulink.components.system import System
class TestTransferMechanismInputs:
# VALID INPUTS
def test_transfer_mech_inputs_list_of_ints(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([10, 10, 10, 10])
assert np.allclose(val, [[10.0, 10.0, 10.0, 10.0]])
assert len(T.size) == 1 and T.size[0] == 4 and isinstance(T.size[0], np.integer)
# this test assumes size is returned as a 1D array: if it's not, then several tests in this file must be changed
def test_transfer_mech_inputs_list_of_floats(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([10.0, 10.0, 10.0, 10.0])
assert np.allclose(val, [[10.0, 10.0, 10.0, 10.0]])
# def test_transfer_mech_inputs_list_of_fns(self):
#
# T = TransferMechanism(
# name='T',
# default_variable=[0, 0, 0, 0],
# integrator_mode=True
# )
# val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()])
# assert np.allclose(val, [[np.array([0.]), 0.4001572083672233, np.array([1.]), 0.7872011523172707]]
def test_transfer_mech_variable_3D_array(self):
T = TransferMechanism(
name='T',
default_variable=[[[0, 0, 0, 0]], [[1, 1, 1, 1]]],
integrator_mode=True
)
np.testing.assert_array_equal(T.instance_defaults.variable, np.array([[[0, 0, 0, 0]], [[1, 1, 1, 1]]]))
def test_transfer_mech_variable_none_size_none(self):
T = TransferMechanism(
name='T'
)
assert len(T.instance_defaults.variable) == 1 and T.instance_defaults.variable[0] == 0
def test_transfer_mech_inputs_list_of_strings(self):
with pytest.raises(UtilitiesError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
integrator_mode=True
)
T.execute(["one", "two", "three", "four"])
assert "has non-numeric entries" in str(error_text.value)
def test_transfer_mech_inputs_mismatched_with_default_longer(self):
with pytest.raises(MechanismError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
integrator_mode=True
)
T.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
def test_transfer_mech_inputs_mismatched_with_default_shorter(self):
with pytest.raises(MechanismError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0, 0, 0],
integrator_mode=True
)
T.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
class TestTransferMechanismNoise:
def test_transfer_mech_array_var_float_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=5.0,
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[5.0, 5.0, 5.0, 5.0]])
def test_transfer_mech_array_var_normal_len_1_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=NormalDist().function,
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[0.41059850193837233, 0.144043571160878, 1.454273506962975, 0.7610377251469934]])
def test_transfer_mech_array_var_normal_array_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=[NormalDist().function, NormalDist().function, NormalDist().function, NormalDist().function],
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
expected = [0.7610377251469934, 0.12167501649282841, 0.44386323274542566, 0.33367432737426683]
for i in range(len(val[0])):
assert val[0][i] == expected[i]
def test_transfer_mech_array_var_normal_array_noise2(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=[5.0, 5.0, 5.0, 5.0],
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[5.0, 5.0, 5.0, 5.0]])
def test_transfer_mech_mismatched_shape_noise(self):
with pytest.raises(MechanismError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0],
function=Linear(),
noise=[5.0, 5.0, 5.0],
smoothing_factor=0.1,
integrator_mode=True
)
T.execute()
assert 'Noise parameter' in str(error_text.value) and "does not match default variable" in str(
error_text.value)
def test_transfer_mech_mismatched_shape_noise_2(self):
with pytest.raises(MechanismError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0],
function=Linear(),
noise=[5.0, 5.0],
smoothing_factor=0.1,
integrator_mode=True
)
T.execute()
assert 'Noise parameter' in str(error_text.value) and "does not match default variable" in str(error_text.value)
class TestDistributionFunctions:
def test_transfer_mech_normal_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=NormalDist().function,
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[0.41059850193837233, 0.144043571160878, 1.454273506962975, 0.7610377251469934]])
def test_transfer_mech_normal_noise_standard_dev_error(self):
with pytest.raises(FunctionError) as error_text:
standard_deviation = -2.0
T = TransferMechanism(
name="T",
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=NormalDist(standard_dev=standard_deviation).function,
smoothing_factor=1.0,
integrator_mode=True
)
assert "The standard_dev parameter" in str(error_text) and "must be greater than zero" in str(error_text)
def test_transfer_mech_exponential_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=ExponentialDist().function,
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[0.4836021009022533, 1.5688961399691683, 0.7526741095365884, 0.8394328467388229]])
def test_transfer_mech_uniform_to_normal_noise(self):
try:
import scipy
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=UniformToNormalDist().function,
smoothing_factor=1.0
)
np.random.seed(22)
val = T.execute([0, 0, 0, 0])
assert np.allclose(val, [[-0.81177443, -0.04593492, -0.20051725, 1.07665147]])
except:
with pytest.raises(FunctionError) as error_text:
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=UniformToNormalDist().function,
smoothing_factor=1.0
)
assert "The UniformToNormalDist function requires the SciPy package." in str(error_text)
def test_transfer_mech_Uniform_noise(self):
T = TransferMechanism(
name='T',
default_variable=[0, 0, 0, 0],
function=Linear(),
noise=UniformDist().function,
smoothing_factor=1.0,
integrator_mode=True
)
val = T.execute([0, 0, 0, 0])
assert
|
np.allclose(val, [[0.3834415188257777, 0.7917250380826646, 0.5288949197529045, 0.5680445610939323]])
|
numpy.allclose
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
import time
import numpy as np
import pandas as pd
import os.path
import time
import datetime
import re
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph, Model
from keras.models import model_from_json
from keras.layers import Input, merge, Flatten, Dense, Activation, Convolution1D, ZeroPadding1D
#from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten, Reshape, Permute, Merge, Lambda
#from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D, UpSampling1D, UpSampling2D, ZeroPadding1D
from keras.layers.advanced_activations import ParametricSoftplus, SReLU
from keras.callbacks import ModelCheckpoint, Callback
import matplotlib.pyplot as plt
path = "./training_data_large/" # to make sure signal files are written in same directory as data files
def draw_model(model):
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
from keras.utils.visualize_util import plot
#graph = to_graph(model, show_shape=True)
#graph.write_png("UFCNN_1.png")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
plot(model, to_file='UFCNN_1.png')
def print_nodes_shapes(model):
for k, v in model.inputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.nodes.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.outputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
def print_layers_shapes(model):
for l in model.layers:
print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(path + model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(path + model_name + '_weights.h5', overwrite=True)
yaml_string = model.to_yaml()
with open(path + model_name + '_data.yml', 'w') as outfile:
outfile.write( yaml_string)
def load_neuralnet(model_name):
"""
reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
"""
arch_name = path + model_name + '_architecture.json'
weight_name = path + model_name + '_weights.h5'
if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
sys.exit()
print("Loaded model: ",model_name)
model = model_from_json(open(arch_name).read())
model.load_weights(weight_name)
return model
def ufcnn_model_concat(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_deconv(sequence_length=5000,
features=4,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = False,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_seq(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform"):
model = Sequential()
model.add(ZeroPadding1D(2, input_shape=(None, features)))
#########################################################
model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init))
model.add(Activation('relu'))
model.add(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init))
model.add(Activation('sigmoid'))
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform",
mode='concat'):
if mode == 'concat':
return ufcnn_model_concat(sequence_length,
features,
nb_filter,
filter_length,
output_dim,
optimizer,
loss,
regression,
class_mode,
init)
else:
raise NotImplemented
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
Ernst 20160301 from https://github.com/fchollet/keras/blob/master/examples/stateful_lstm.py
as a first test for the ufcnn
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
print("Cos. Shape",cos.shape)
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
def train_and_predict_regression(model, sequence_length=5000, batch_size=128, epochs=5):
lahead = 1
cos = gen_cosine_amp(xn = sequence_length * 100)
expected_output = np.zeros((len(cos), 1, 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit({'input': cos, 'output': expected_output},
verbose=1,
nb_epoch=1,
shuffle=False,
batch_size=batch_size)
print('Predicting')
predicted_output = model.predict({'input': cos,}, batch_size=batch_size)
return {'model': model, 'predicted_output': predicted_output, 'expected_output': expected_output}
def treat_X_tradcom(mean):
""" treat some columns of the dataframe together when normalizing the dataframe:
col. 1, 2, 4 ... Mkt Price, Bid price, Ask Price
col 3 and 5 ... Ask & Bid price
"""
result = mean.copy()
#print("Result before max",result)
mkt = mean[1]
bid_px = mean[2]
ask_px = mean[4]
px_max=max(mkt,bid_px,ask_px)
result[1] = px_max
result[2] = px_max
result[4] = px_max
bid = mean[3]
ask = mean[5]
ba_max=max(bid,ask)
result[3] = ba_max
result[5] = ba_max
print("Result after max",result)
return result
def standardize_inputs(source, colgroups=None, mean=None, std=None):
"""
Standardize input features.
Groups of features could be listed in order to be standardized together.
source: Pandas.DataFrame or filename of csv file with features
colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size)
returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame
"""
import itertools
import types
#if isinstance(source, types.StringTypes):
if isinstance(source, str):
Xdf = pd.read_csv(source, sep=" ", index_col = 0, header = None)
elif isinstance(source, pd.DataFrame):
Xdf = source
else:
raise TypeError
df = pd.DataFrame()
me = pd.DataFrame()
st = pd.DataFrame()
for colgroup in colgroups:
_df,_me,_st = standardize_columns(Xdf[colgroup])
# if mean & std are given, do not multiply with colgroup mean
if mean is not None and std is not None:
_df = Xdf[colgroup]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
print("In Group me")
print(me)
# _temp_list = list(itertools.chain.from_iterable(colgroups))
separate_features = [col for col in Xdf.columns if col not in list(itertools.chain.from_iterable(colgroups))]
if mean is None and std is None:
_me = Xdf[separate_features].mean()
_df = Xdf[separate_features].sub(_me)
_st = Xdf[separate_features].std()
_df = _df[separate_features].div(_st)
else:
_df = Xdf[separate_features]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
me = pd.Series(me[0])
st = pd.Series(st[0])
if mean is not None and std is not None:
df = df.sub(mean)
df = df.div(std)
return df, me, st
def standardize_columns(colgroup):
"""
Standardize group of columns together
colgroup: Pandas.DataFrame
returns: Pandas.DataFrames: Colum Group standardized, Mean of the colgroup, stddeviation of the colgroup
"""
_me = np.mean(colgroup.values.flatten())
centered = colgroup.sub(_me)
me = pd.DataFrame(np.full(len(colgroup.columns),_me), index=colgroup.columns)
_st = np.std(colgroup.values.flatten())
standardized = centered.div(_st)
st = pd.DataFrame(np.full(len(colgroup.columns),_st), index=colgroup.columns)
return standardized, me, st
def get_tradcom_normalization(filename, mean=None, std=None):
""" read in all X Data Frames and find mean and std of all columns...
"""
Xdf = pd.read_csv(filename, sep=" ", index_col = 0, header = None)
meanLoc = treat_X_tradcom(Xdf.mean())
print("Mean Loc")
print (meanLoc)
sys.stdout.flush()
if mean is None:
mean = meanLoc
mean = mean.to_frame().transpose()
meanDf=pd.concat([mean, meanLoc.to_frame().transpose()])
mean = meanDf.max()
print("Mean")
print (mean)
sys.stdout.flush()
stdLoc = treat_X_tradcom(Xdf.std())
print("Std Loc")
print (stdLoc)
sys.stdout.flush()
if std is None:
std = stdLoc
std = std.to_frame().transpose()
stdDf=pd.concat([std, stdLoc.to_frame().transpose()])
std = stdDf.max()
print("Std")
print (std)
sys.stdout.flush()
return(mean, std)
def prepare_tradcom_classification(training=True,
ret_type='df',
sequence_length=5000,
features_list=[1,2,3,4],
output_dim=3,
file_list=None,
mean=None,
std=None,
training_count=None):
"""
prepare the datasets for the trading competition. training determines which datasets will be read
returns: X and y: Pandas.DataFrames or np-Arrays storing the X - and y values for the fitting.
TODO: refactor - move file operations to separate functions, move stacking to function,
remove commented blocks and undesired print statements
"""
load_file = {'df': pd.read_pickle,
'stack': np.load,
'flat': np.load}
save_file = {'df': lambda filename, obj: obj.to_pickle(filename),
'stack': lambda filename, obj:
|
np.save(filename, obj)
|
numpy.save
|
'''
Test of steady-state module
'''
import multiprocessing
from distributed import Client, LocalCluster
import pytest
import numpy as np
import os
from ogusa import SS, utils, aggregates, household, execute, constants
from ogusa.parameters import Specifications
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
NUM_WORKERS = min(multiprocessing.cpu_count(), 7)
@pytest.fixture(scope="module")
def dask_client():
cluster = LocalCluster(n_workers=NUM_WORKERS, threads_per_worker=2)
client = Client(cluster)
yield client
# teardown
client.close()
cluster.close()
input_tuple = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_fsolve_inputs.pkl'))
guesses_in, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, chi_params, ss_params, income_tax_params,
iterative_params, small_open_params, client, num_workers) = params
p1 = Specifications()
(p1.J, p1.S, p1.T, p1.BW, p1.beta, p1.sigma, p1.alpha, p1.gamma, p1.epsilon,
Z, p1.delta, p1.ltilde, p1.nu, p1.g_y, p1.g_n_ss, tau_payroll,
tau_bq, p1.rho, p1.omega_SS, p1.budget_balance, alpha_T,
p1.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p1.e,
retire, p1.mean_income_data, h_wealth, p_wealth, m_wealth,
p1.b_ellipse, p1.upsilon) = ss_params
p1.eta = (p1.omega_SS.reshape(p1.S, 1) *
p1.lambdas.reshape(1, p1.J)).reshape(1, p1.S, p1.J)
p1.Z = np.ones(p1.T + p1.S) * Z
p1.tau_bq = np.ones(p1.T + p1.S) * 0.0
p1.tau_payroll = np.ones(p1.T + p1.S) * tau_payroll
p1.alpha_T = np.ones(p1.T + p1.S) * alpha_T
p1.tau_b = np.ones(p1.T + p1.S) * tau_b
p1.delta_tau = np.ones(p1.T + p1.S) * delta_tau
p1.h_wealth = np.ones(p1.T + p1.S) * h_wealth
p1.p_wealth = np.ones(p1.T + p1.S) * p_wealth
p1.m_wealth = np.ones(p1.T + p1.S) * m_wealth
p1.retire = (np.ones(p1.T + p1.S) * retire).astype(int)
p1.lambdas = lambdas.reshape(p1.J, 1)
p1.imm_rates = imm_rates.reshape(1, p1.S)
p1.tax_func_type = 'DEP'
p1.zeta_K = np.array([0.0])
p1.zeta_D = np.array([0.0])
p1.initial_foreign_debt_ratio = 0.0
p1.r_gov_shift = np.array([0.0])
p1.start_year = 2019
p1.baseline = False
p1.baseline = True
p1.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
income_tax_params
p1.etr_params = np.transpose(etr_params.reshape(
p1.S, 1, etr_params.shape[-1]), (1, 0, 2))
p1.mtrx_params = np.transpose(mtrx_params.reshape(
p1.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
p1.mtry_params = np.transpose(mtry_params.reshape(
p1.S, 1, mtry_params.shape[-1]), (1, 0, 2))
p1.maxiter, p1.mindist_SS = iterative_params
p1.chi_b, p1.chi_n = chi_params
small_open, firm_r, hh_r = small_open_params
p1.world_int_rate = np.ones(p1.T + p1.S) * firm_r
p1.num_workers = 1
BQ1 = np.ones((p1.J)) * 0.00019646295986015257
guesses1 = [guesses_in[0]] + list(BQ1) + [guesses_in[1]] + [guesses_in[2]]
args1 = (bssmat, nssmat, None, None, p1, client)
expected1 = np.array([0.06858352869423862, 0.0157424466869841,
0.020615373965602958, 0.02225725864386594,
0.01359148091834126, 0.01604345296066714,
0.018393166562212734, 0.0033730256425707566,
-0.07014671511880782, 0.05424969771042221])
input_tuple = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_fsolve_reform_inputs.pkl'))
guesses_in2, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, chi_params, ss_params, income_tax_params,
iterative_params, factor, small_open_params, client,
num_workers) = params
p2 = Specifications()
(p2.J, p2.S, p2.T, p2.BW, p2.beta, p2.sigma, p2.alpha, p2.gamma, p2.epsilon,
Z, p2.delta, p2.ltilde, p2.nu, p2.g_y, p2.g_n_ss, tau_payroll,
tau_bq, p2.rho, p2.omega_SS, p2.budget_balance, alpha_T,
p2.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p2.e,
retire, p2.mean_income_data, h_wealth, p_wealth, m_wealth,
p2.b_ellipse, p2.upsilon) = ss_params
p2.eta = (p2.omega_SS.reshape(p2.S, 1) *
p2.lambdas.reshape(1, p2.J)).reshape(1, p2.S, p2.J)
p2.Z = np.ones(p2.T + p2.S) * Z
p2.tau_bq = np.ones(p2.T + p2.S) * 0.0
p2.tau_payroll = np.ones(p2.T + p2.S) * tau_payroll
p2.alpha_T = np.ones(p2.T + p2.S) * alpha_T
p2.tau_b = np.ones(p2.T + p2.S) * tau_b
p2.delta_tau = np.ones(p2.T + p2.S) * delta_tau
p2.h_wealth = np.ones(p2.T + p2.S) * h_wealth
p2.p_wealth = np.ones(p2.T + p2.S) * p_wealth
p2.m_wealth = np.ones(p2.T + p2.S) * m_wealth
p2.retire = (np.ones(p2.T + p2.S) * retire).astype(int)
p2.lambdas = lambdas.reshape(p2.J, 1)
p2.imm_rates = imm_rates.reshape(1, p2.S)
p2.tax_func_type = 'DEP'
p2.zeta_K = np.array([0.0])
p2.zeta_D = np.array([0.0])
p2.initial_foreign_debt_ratio = 0.0
p2.r_gov_shift = np.array([0.0])
p2.start_year = 2019
p2.baseline = False
p2.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
income_tax_params
p2.etr_params = np.transpose(etr_params.reshape(
p2.S, 1, etr_params.shape[-1]), (1, 0, 2))
p2.mtrx_params = np.transpose(mtrx_params.reshape(
p2.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
p2.mtry_params = np.transpose(mtry_params.reshape(
p2.S, 1, mtry_params.shape[-1]), (1, 0, 2))
p2.maxiter, p2.mindist_SS = iterative_params
p2.chi_b, p2.chi_n = chi_params
small_open, firm_r, hh_r = small_open_params
p2.world_int_rate = np.ones(p2.T + p2.S) * firm_r
p2.num_workers = 1
BQ2 = np.ones((p2.J)) * 0.00019646295986015257
guesses2 = [guesses_in2[0]] + list(BQ2) + [guesses_in2[1]]
args2 = (bssmat, nssmat, None, factor, p2, client)
expected2 = np.array([0.016757343762877415, 0.01435509375160598,
0.019450554513959047, 0.020767620498430173,
0.012363834824786278, 0.014583252714123543,
0.01716246184210253, 0.003106382567096101,
0.0016798428580572025])
input_tuple = utils.safe_read_pickle(
os.path.join(
CUR_PATH,
'test_io_data', 'SS_fsolve_reform_baselinespend_inputs.pkl'))
guesses_in3, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, TR_ss, chi_params, ss_params, income_tax_params,
iterative_params, factor_ss, small_open_params, client,
num_workers) = params
p3 = Specifications()
(p3.J, p3.S, p3.T, p3.BW, p3.beta, p3.sigma, p3.alpha, p3.gamma, p3.epsilon,
Z, p3.delta, p3.ltilde, p3.nu, p3.g_y, p3.g_n_ss, tau_payroll,
tau_bq, p3.rho, p3.omega_SS, p3.budget_balance, alpha_T,
p3.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p3.e,
retire, p3.mean_income_data, h_wealth, p_wealth, m_wealth,
p3.b_ellipse, p3.upsilon) = ss_params
p3.eta = (p3.omega_SS.reshape(p3.S, 1) *
p3.lambdas.reshape(1, p3.J)).reshape(1, p3.S, p3.J)
p3.Z = np.ones(p3.T + p3.S) * Z
p3.tau_bq = np.ones(p3.T + p3.S) * 0.0
p3.tau_payroll = np.ones(p3.T + p3.S) * tau_payroll
p3.alpha_T = np.ones(p3.T + p3.S) * alpha_T
p3.tau_b = np.ones(p3.T + p3.S) * tau_b
p3.delta_tau = np.ones(p3.T + p3.S) * delta_tau
p3.h_wealth = np.ones(p3.T + p3.S) * h_wealth
p3.p_wealth = np.ones(p3.T + p3.S) * p_wealth
p3.m_wealth = np.ones(p3.T + p3.S) * m_wealth
p3.retire = (np.ones(p3.T + p3.S) * retire).astype(int)
p3.lambdas = lambdas.reshape(p3.J, 1)
p3.imm_rates = imm_rates.reshape(1, p3.S)
p3.tax_func_type = 'DEP'
p3.zeta_K = np.array([0.0])
p3.zeta_D = np.array([0.0])
p3.initial_foreign_debt_ratio = 0.0
p3.r_gov_shift = np.array([0.0])
p3.start_year = 2019
p3.baseline = False
p3.baseline_spending = True
p3.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
income_tax_params
p3.etr_params = np.transpose(etr_params.reshape(
p3.S, 1, etr_params.shape[-1]), (1, 0, 2))
p3.mtrx_params = np.transpose(mtrx_params.reshape(
p3.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
p3.mtry_params = np.transpose(mtry_params.reshape(
p3.S, 1, mtry_params.shape[-1]), (1, 0, 2))
p3.maxiter, p3.mindist_SS = iterative_params
p3.chi_b, p3.chi_n = chi_params
small_open, firm_r, hh_r = small_open_params
p3.world_int_rate = np.ones(p3.T + p3.S) * firm_r
p3.num_workers = 1
BQ3 = np.ones((p3.J)) * 0.00019646295986015257
guesses3 = [guesses_in3[0]] + list(BQ3) + [guesses_in3[1]]
args3 = (bssmat, nssmat, TR_ss, factor_ss, p3, client)
expected3 = np.array([0.016757345515050044, 0.014355093775301265,
0.019450554545951612, 0.020767620470159415,
0.01236383484523906, 0.014583252738190352,
0.01716246187036924, 0.0031063825724743474,
0.018664915456857223])
input_tuple = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_fsolve_inputs.pkl'))
guesses_in, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, chi_params, ss_params, income_tax_params,
iterative_params, small_open_params, client, num_workers) = params
p4 = Specifications()
new_param_values = {
'zeta_D': [0.4],
'zeta_K': [0.2]
}
p4.update_specifications(new_param_values)
(p4.J, p4.S, p4.T, p4.BW, p4.beta, p4.sigma, p4.alpha, p4.gamma, p4.epsilon,
Z, p4.delta, p4.ltilde, p4.nu, p4.g_y, p4.g_n_ss, tau_payroll,
tau_bq, p4.rho, p4.omega_SS, p4.budget_balance, alpha_T,
p4.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p4.e,
retire, p4.mean_income_data, h_wealth, p_wealth, m_wealth,
p4.b_ellipse, p4.upsilon) = ss_params
p4.eta = (p4.omega_SS.reshape(p4.S, 1) *
p4.lambdas.reshape(1, p4.J)).reshape(1, p4.S, p4.J)
p4.Z = np.ones(p4.T + p4.S) * Z
p4.tau_bq = np.ones(p4.T + p4.S) * 0.0
p4.tau_payroll = np.ones(p4.T + p4.S) * tau_payroll
p4.alpha_T = np.ones(p4.T + p4.S) * alpha_T
p4.tau_b = np.ones(p4.T + p4.S) * tau_b
p4.delta_tau = np.ones(p4.T + p4.S) * delta_tau
p4.h_wealth = np.ones(p4.T + p4.S) * h_wealth
p4.p_wealth = np.ones(p4.T + p4.S) * p_wealth
p4.m_wealth = np.ones(p4.T + p4.S) * m_wealth
p4.retire = (np.ones(p4.T + p4.S) * retire).astype(int)
p4.lambdas = lambdas.reshape(p4.J, 1)
p4.imm_rates = imm_rates.reshape(1, p4.S)
p4.tax_func_type = 'DEP'
p4.baseline = True
p4.analytical_mtrs, etr_params, mtrx_params, mtry_params =\
income_tax_params
p4.etr_params = np.transpose(etr_params.reshape(
p4.S, 1, etr_params.shape[-1]), (1, 0, 2))
p4.mtrx_params = np.transpose(mtrx_params.reshape(
p4.S, 1, mtrx_params.shape[-1]), (1, 0, 2))
p4.mtry_params = np.transpose(mtry_params.reshape(
p4.S, 1, mtry_params.shape[-1]), (1, 0, 2))
p4.maxiter, p4.mindist_SS = iterative_params
p4.chi_b, p4.chi_n = chi_params
p4.num_workers = 1
BQ4 = np.ones((p4.J)) * 0.00019646295986015257
guesses4 = [guesses_in[0]] + list(BQ4) + [guesses_in[1]] + [guesses_in[2]]
args4 = (bssmat, nssmat, None, None, p4, client)
expected4 = np.array([0.028883118596741857, 0.014511613659907734,
0.019044550115699707, 0.02065761642516883,
0.012627889727738099, 0.014940813299332474,
0.016999514675696315, 0.0030878921261591253,
-0.06125508233576064, 0.06697984483743183])
input_tuple = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_fsolve_inputs.pkl'))
guesses_in, params = input_tuple
params = params + (None, 1)
(bssmat, nssmat, chi_params, ss_params, income_tax_params,
iterative_params, small_open_params, client, num_workers) = params
p5 = Specifications()
(p5.J, p5.S, p5.T, p5.BW, p5.beta, p5.sigma, p5.alpha, p5.gamma, p5.epsilon,
Z, p5.delta, p5.ltilde, p5.nu, p5.g_y, p5.g_n_ss, tau_payroll,
tau_bq, p5.rho, p5.omega_SS, p5.budget_balance, alpha_T,
p5.debt_ratio_ss, tau_b, delta_tau, lambdas, imm_rates, p5.e,
retire, p5.mean_income_data, h_wealth, p_wealth, m_wealth,
p5.b_ellipse, p5.upsilon) = ss_params
p5.eta = (p5.omega_SS.reshape(p5.S, 1) *
p5.lambdas.reshape(1, p5.J)).reshape(1, p5.S, p5.J)
p5.zeta_K = np.ones(p5.T + p5.S) * 1.0
p5.world_int_rate = np.ones(p5.T + p5.S) * 0.05
p5.Z = np.ones(p5.T + p5.S) * Z
p5.tau_bq = np.ones(p5.T + p5.S) * 0.0
p5.tau_payroll =
|
np.ones(p5.T + p5.S)
|
numpy.ones
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os, pickle, tqdm
import json
import numpy as np
import pandas as pd
import multiprocessing as mp
from .registry import METRIC
from .base import BaseMetric
from .ActivityNet import ANETproposal
from paddlevideo.utils import get_logger
logger = get_logger("paddlevideo")
def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):
"""Compute jaccard score between a box and the anchors.
"""
len_anchors = anchors_max - anchors_min
int_xmin = np.maximum(anchors_min, box_min)
int_xmax = np.minimum(anchors_max, box_max)
inter_len = np.maximum(int_xmax - int_xmin, 0.)
union_len = len_anchors - inter_len + box_max - box_min
jaccard = np.divide(inter_len, union_len)
return jaccard
def boundary_choose(score_list):
"""Choose start and end boundary from score.
"""
max_score = max(score_list)
mask_high = (score_list > max_score * 0.5)
score_list = list(score_list)
score_middle = np.array([0.0] + score_list + [0.0])
score_front = np.array([0.0, 0.0] + score_list)
score_back = np.array(score_list + [0.0, 0.0])
mask_peak = ((score_middle > score_front) & (score_middle > score_back))
mask_peak = mask_peak[1:-1]
mask = (mask_high | mask_peak).astype('float32')
return mask
def soft_nms(df, alpha, t1, t2):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False)
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(np.array(tstart), np.array(tend),
tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(
-np.square(tmp_iou) / alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def soft_nms_for_merging(proposal_dict, alpha=0.4, t1=0.55, t2=0.9, dscale=4):
'''
proposal_dict: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
#df = df.sort_values(by="score", ascending=False)
sorted_proposal = sorted(proposal_dict, key=lambda x:x["score"], reverse=True)
tstart = []
tend = []
tscore = []
for pp in sorted_proposal:
tstart.append(pp["segment"][0])
tend.append(pp["segment"][1])
tscore.append(pp["score"])
rstart = []
rend = []
rscore = []
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(np.array(tstart), np.array(tend),
tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = (tend[max_index] - tstart[max_index])/dscale
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(
-np.square(tmp_iou) / alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
new_proposal = []
for i in range(len(rscore)):
pp = {}
pp['score'] = round(rscore[i], 2)
pp["segment"] = [round(rstart[i], 2), round(rend[i], 2)]
new_proposal.append(pp)
return new_proposal
@METRIC.register
class BMNMetric(BaseMetric):
"""
Metrics for BMN. Two Stages in this metric:
(1) Get test results using trained model, results will be saved in BMNMetric.result_path;
(2) Calculate metrics using results file from stage (1).
"""
def __init__(self,
data_size,
batch_size,
tscale,
dscale,
file_path,
ground_truth_filename,
subset,
output_path,
result_path,
get_metrics=True,
log_interval=100,
to_merge=False):
"""
Init for BMN metrics.
Params:
get_metrics: whether to calculate AR@N and AUC metrics or not, default True.
"""
super().__init__(data_size, batch_size, log_interval)
assert self.batch_size == 1, " Now we just support batch_size==1 test"
assert self.world_size == 1, " Now we just support single-card test"
self.tscale = tscale
self.dscale = dscale
self.file_path = file_path
self.ground_truth_filename = ground_truth_filename
self.subset = subset
self.output_path = output_path
self.result_path = result_path
self.get_metrics = get_metrics
self.to_merge = to_merge
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
if not os.path.isdir(self.result_path):
os.makedirs(self.result_path)
self.video_dict, self.video_list = self.get_dataset_dict(
self.file_path, self.subset)
def get_dataset_dict(self, file_path, subset):
annos = json.load(open(file_path))
video_dict = {}
for video_name in annos.keys():
video_subset = annos[video_name]["subset"]
if subset in video_subset:
video_dict[video_name] = annos[video_name]
video_list = list(video_dict.keys())
video_list.sort()
return video_dict, video_list
def update(self, batch_id, data, outputs):
"""update metrics during each iter
"""
fid = data[4].numpy()
pred_bm, pred_start, pred_end = outputs
pred_bm = pred_bm.numpy()
pred_start = pred_start[0].numpy()
pred_end = pred_end[0].numpy()
snippet_xmins = [1.0 / self.tscale * i for i in range(self.tscale)]
snippet_xmaxs = [
1.0 / self.tscale * i for i in range(1, self.tscale + 1)
]
cols = ["xmin", "xmax", "score"]
video_name = self.video_list[fid[0]]
pred_bm = pred_bm[0, 0, :, :] * pred_bm[0, 1, :, :]
start_mask = boundary_choose(pred_start)
start_mask[0] = 1.
end_mask = boundary_choose(pred_end)
end_mask[-1] = 1.
score_vector_list = []
for idx in range(self.dscale):
for jdx in range(self.tscale):
start_index = jdx
end_index = start_index + idx
if end_index < self.tscale and start_mask[
start_index] == 1 and end_mask[end_index] == 1:
xmin = snippet_xmins[start_index]
xmax = snippet_xmaxs[end_index]
xmin_score = pred_start[start_index]
xmax_score = pred_end[end_index]
bm_score = pred_bm[idx, jdx]
conf_score = xmin_score * xmax_score * bm_score
score_vector_list.append([xmin, xmax, conf_score])
score_vector_list = np.stack(score_vector_list)
video_df = pd.DataFrame(score_vector_list, columns=cols)
video_df.to_csv(os.path.join(self.output_path, "%s.csv" % video_name),
index=False)
if batch_id % self.log_interval == 0:
logger.info("Processing................ batch {}".format(batch_id))
def accumulate(self):
"""accumulate metrics when finished all iters.
"""
# check clip index of each video
#Stage1
self.bmn_post_processing(self.video_dict, self.subset, self.output_path,
self.result_path)
if self.get_metrics:
result_path = os.path.join(self.result_path, "bmn_results_validation.json")
if self.to_merge:
merged_result_path = os.path.join(self.result_path, "bmn_merged_results_validation.json")
self.merging_output_per_video(self.tscale, self.ground_truth_filename,
result_path,
merged_result_path)
result_path = merged_result_path
logger.info("[TEST] calculate metrics...")
#Stage2
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = self.cal_metrics(
self.ground_truth_filename,
result_path,
max_avg_nr_proposals=100,
tiou_thresholds=
|
np.linspace(0.5, 0.9, 9)
|
numpy.linspace
|
"""Calculate integrals following an isentropic circuit
Calculate length and circulation around the isentropic circuit.
Calculate area/volume/mass/pv substance of the enclosed area of the isentropic
circuit.
"""
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import iris
from iris.analysis import SUM, cartography
from irise import convert, plot
from irise.constants import Omega
from pylagranto import trajectory
a = 6378100
base_time = datetime.datetime(1970, 1, 1)
base_time_units = "hours since 1970-01-01 00:00:00"
def calc_circulation(case, dtheta=1, region="outflow"):
if region == "outflow":
tr = trajectory.load(case.data_path / "isentropic_trajectories_backward.pkl") + \
trajectory.load(case.data_path / "isentropic_trajectories_forward.pkl")
elif region == "inflow":
tr = trajectory.load(case.data_path / "isentropic_trajectories_from_inflow_forward.pkl")
ntra, nt, ndim = tr.data.shape
theta = np.unique(tr["air_potential_temperature"])
theta = theta[np.where(theta != -1000)]
time = [(t - base_time).total_seconds() // 3600 for t in tr.times]
time = iris.coords.DimCoord(
points=time,
standard_name="time",
units=base_time_units,
)
theta_levels = iris.coords.DimCoord(
points=theta,
standard_name="air_potential_temperature",
units="K",
)
circ = iris.cube.Cube(
data=np.empty([len(theta), nt]),
long_name="circulation",
units="m2 s-1",
dim_coords_and_dims=[(theta_levels, 0), (time, 1)]
)
circ_r = circ.copy()
circ_r.rename("relative_circulation")
circ_p = circ.copy()
circ_p.rename("planetary_circulation")
results = iris.cube.CubeList([circ, circ_r, circ_p])
# Repeat for all theta levels
for j, theta_level in enumerate(theta):
tr_theta = tr.select("air_potential_temperature", "==", theta_level,
time=[case.outflow_lead_time])
results_intermediate = iris.cube.CubeList()
for n in range(nt):
lon = tr_theta.x[:-2, n]
lat = tr_theta.y[:-2, n]
alt = tr_theta['altitude'][:-2, n]
u = tr_theta['x_wind'][:-2, n]
v = tr_theta['y_wind'][:-2, n]
w = tr_theta['upward_air_velocity'][:-2, n]
# Integrals are invalid once trajectories leave the domain but we don't
# want to stop the script so put NaNs in the output instead
if (alt == -1000).any():
circ_r.data[j, n], circ_p.data[j, n], circ.data[j, n] = \
np.nan, np.nan, np.nan
else:
circ_r.data[j, n], circ_p.data[j, n], circ.data[j, n] = \
circuit_integrals(u, v, w, lon, lat, alt)
# Calculate enclosed area integrals
try:
cubes = iris.load(
case.filename_theta(tr_theta.times[n], tracer_files=["c"]),
iris.Constraint(time=tr_theta.times[n])
)
print("{}K: {}".format(theta_level, tr_theta.times[n]))
except OSError:
print(str(tr_theta.times[n]) + " not available")
break
# Remove duplicate altitudes
z = cubes.extract("altitude")
cubes.remove(z[0])
dlambda = np.deg2rad(np.diff(z[1].coord("longitude").points).mean())
dphi = np.deg2rad(np.diff(z[1].coord("latitude").points).mean())
integrals = mass_integrals(cubes, lon, lat, theta_level, dtheta, dlambda, dphi)
for icube in integrals:
icube.attributes = {}
# Set integrals to NaN if trajectories have left the domain
if (alt == -1000).any():
icube.data = np.nan
results_intermediate.append(icube)
for cube in results_intermediate.merge():
results.append(cube)
iris.save(results.merge(), str(case.data_path / "circulation_{}.nc".format(region)))
return
def circuit_integrals(u, v, w, lon, lat, z):
"""
Args:
u (numpy.ndarray):
v (numpy.ndarray):
w (numpy.ndarray):
lon (numpy.ndarray):
lat (numpy.ndarray):
z (numpy.ndarray):
Returns:
"""
# Convert to radians
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
# u_planetary = \Omega r cos(lat)
u_planetary = Omega.data * (a + z) * np.cos(lat)
# Integrate dl around the circuit of trajectories
dx, dy, dz = [], [], []
for n in range(len(u)):
# Allow a complete loop (back to zero at the end)
np1 = (n + 1) % len(u)
# dx = r cos(lat) dlon
dx.append((a + z[n]) * np.cos(lat[n]) * 0.5 * (lon[np1] - lon[n - 1]))
# dy = r dlat
dy.append((a + z[n]) * 0.5 * (lat[np1] - lat[n - 1]))
# dz is independent of grid rotation
dz.append(0.5 * (z[np1] - z[n - 1]))
dx =
|
np.array(dx)
|
numpy.array
|
import os
import tensorflow as tf
import random
import numpy as np
from itertools import cycle, islice
import matplotlib.pyplot as plt
from config.config import Config
from network.dataset.image_preprocessing import resize_image, norm_image, norm_mask
from network.dataset.image_loading import load_image, load_mask_from_img, load_weights
from network.dataset.image_transformations import contrast_stretch_image, histo_equalize_image
from network.dataset.tf_image_augementation import data_augmentation_wgt, data_augmentation
def reshape_for_weighting(cfg, tensor_image, tensor_mask, tensor_weights):
# Function to include weights for loss weighting in input
return (tensor_image, tensor_weights), tensor_mask
#####################################################
# Plotting a batch (image, target or prediction) #
#####################################################
def plot_batch(batch):
"""
Plots the batch.
Consists of 2 subplots if the batch does not contain weights.
Consists of 3 subplots if the batch contains weights.
The subplots are column-wise ordered
First column: Images, RGB colormap
Second columns: Masks, standard colormap (0 for background, 1 for foreground)
Third columns: Weights, standard colormap (e.g. close to 0 for background, over 10 for pixels inbetween cells.)
Parameters:
-----------
batch: list of arrays:
[images, masks or predicitions, weights]
each array has the shape (batch size, img height, img width, channels)
"""
img_batch = batch[0]
msk_batch = batch[1]
#print(img_batch.shape)
#print(msk_batch.shape)
img_count = img_batch.shape[0]
# Create output arrays
# 1) Output image
# third dimension with 3 channels since its RGB
output_img = np.zeros(
(img_batch.shape[1] * img_count, img_batch.shape[2], img_batch.shape[3]))
row = 0
for image_id in range(img_count):
image = img_batch[image_id, :, :, :]
#print(image.shape)
output_img[row * img_batch.shape[1]:(row + 1) * img_batch.shape[1], :, :] = image
row += 1
# 2) Masks or predictions
output_msk = np.zeros((msk_batch.shape[1] * img_count, msk_batch.shape[2] * msk_batch.shape[3]))
#print("output_msk", output_msk.shape)
row = 0
for image_id in range(img_count):
for j in range(msk_batch.shape[3]):
mask_ch = msk_batch[image_id, :, :, j]
mask_ch = mask_ch
mask_ch = mask_ch.astype(int)
#mask_ch = np.stack((mask_ch, mask_ch, mask_ch), axis=-1)
#print(mask_ch.shape)
#print(msk_batch.shape)
#print(row * msk_batch.shape[1])
#print((row + 1) * msk_batch.shape[1])
#print(j * msk_batch.shape[2])
#print((j + 1) * msk_batch.shape[2])
output_msk[row * msk_batch.shape[1]:(row + 1) * msk_batch.shape[1],
j * msk_batch.shape[2]:(j + 1) * msk_batch.shape[2]] = mask_ch
row += 1
if len(batch) == 3:
# Generator yields img, masks and weights
#print('Info: Generator yields img, masks, weights.')
wgt_batch = batch[2]
#print('Weights shape:', wgt_batch.shape)
# 3) Weights
output_wgt = np.zeros((wgt_batch.shape[1] * img_count, wgt_batch.shape[2] * wgt_batch.shape[3]))
row = 0
for image_id in range(img_count):
for k in range(wgt_batch.shape[3]):
wgt_ch = wgt_batch[image_id, :, :, k]
wgt_ch = wgt_ch # * (255 / wgt_ch.max())
# wgt_ch = wgt_ch.astype(int)
#wgt_ch = np.stack((wgt_ch, wgt_ch, wgt_ch), axis=-1)
output_wgt[row * wgt_batch.shape[1]:(row + 1) * wgt_batch.shape[1],
k * wgt_batch.shape[2]:(k + 1) * wgt_batch.shape[2]] = wgt_ch
row += 1
# Plot
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, gridspec_kw={'width_ratios': [1, msk_batch.shape[3],
wgt_batch.shape[3]]})
# Plot weights
pos = ax3.imshow(output_wgt)
ax3.set_axis_off()
ax3.set_title('Weights', fontsize=15)
else:
# Generator yields img and masks.
print('Info: Generator yields img, masks.')
fig, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, msk_batch.shape[3]]})
# Plot images
pos = ax1.imshow(output_img)
ax1.set_axis_off()
ax1.set_title('Images', fontsize=15)
# Plot masks / predictions
pos = ax2.imshow(output_msk)
ax2.set_axis_off()
ax2.set_title('Masks / Predictions', fontsize=15)
plt.show()
#####################################################
# Functions to load one random batch for inference #
#####################################################
def find_msk_paths(cfg, image_path):
filename = os.path.split(image_path)[1]
foldername = os.path.split(image_path)[0]
foldernam = os.path.split(foldername)[0]
maskfoldername = os.path.join(foldernam, 'masks')
msk_paths = []
if cfg.SEGMENTATION_TASK == 'glomerulus':
msk_paths.append(os.path.join(maskfoldername, filename[:-4] + cfg.MASK_SUFFIXES[0]))
elif cfg.SEGMENTATION_TASK == 'podocytes':
msk_paths.append(os.path.join(maskfoldername, filename[:-4] + cfg.MASK_SUFFIXES[1]))
elif cfg.SEGMENTATION_TASK == 'all':
for suffix in cfg.MASK_SUFFIXES:
msk_paths.append(os.path.join(maskfoldername, filename[:-4] + suffix))
else:
raise ValueError('cfg.SEGMENTATION_TASK does not match to the implemented values: ',
cfg.SEGMENTATION_TASK)
return msk_paths
def find_weight_paths(cfg, image_path):
filename = os.path.split(image_path)[1]
foldername = os.path.split(image_path)[0]
foldernam = os.path.split(foldername)[0]
weightsfoldername = os.path.join(foldernam, 'weights')
wgt_paths = []
if cfg.SEGMENTATION_TASK == 'glomerulus':
wgt_paths.append(os.path.join(weightsfoldername, filename[:-4] + cfg.WEIGHTS_SUFFIXES[0]))
elif cfg.SEGMENTATION_TASK == 'podocytes':
wgt_paths.append(os.path.join(weightsfoldername, filename[:-4] + cfg.WEIGHTS_SUFFIXES[1]))
elif cfg.SEGMENTATION_TASK == 'all':
for suffix in cfg.WEIGHTS_SUFFIXES:
wgt_paths.append(os.path.join(weightsfoldername, filename[:-4] + suffix))
else:
raise ValueError('cfg.SEGMENTATION_TASK does not match to the implemented values: ',
cfg.SEGMENTATION_TASK)
return wgt_paths
def batch_data(array1, array2, multichannel):
"""
Gets arrays as input and concatenates them at axis 0.
Parameters:
-----------
array1: array (2d or 3d)
array2: array (2d or 3d)
multichannel: bool
True: Batch will have 4 dimensions
False: Batch will have 3 dimensions
Return:
-------
batched array of shape (batch_size, array_shape)
"""
# Check if input arrays are valid for this function
# 2 and 3 dimensions are allowed for multichannel=False
# 3 and 4 dimensions are allowed for multichannel=True
if multichannel:
if len(array1.shape) < 3 or len(array2.shape) < 3 or len(array1.shape) > 4 or len(array2.shape) > 4:
raise ValueError('The multilayer input array shapes do not match this function',
array1.shape, array2.shape)
else:
if len(array1.shape) < 2 or len(array2.shape) < 2 or len(array1.shape) > 3 or len(array2.shape) > 3:
raise ValueError('The input array shapes do not match this function',
array1.shape, array2.shape)
# Check if one array has another shape than the other
# Make it then the same
if len(array1.shape) < len(array2.shape):
array1 = np.expand_dims(array1, axis=0)
elif len(array1.shape) > len(array2.shape):
array2 = np.expand_dims(array2, axis=0)
# For images with multiple channels
if multichannel:
# case 1: (x,y,c), (x,y,c)
if len(array1.shape) == 3:
array1 = np.expand_dims(array1, axis=0)
array2 = np.expand_dims(array2, axis=0)
batched_array = np.concatenate((array1, array2), axis=0)
# case 2: (b,x,y,c), (1,x,y,c)
else:
batched_array =
|
np.concatenate((array1, array2), axis=0)
|
numpy.concatenate
|
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from collections import deque
import random
from tqdm import tqdm
import gym
from gym import spaces
from gym.utils import seeding
import math
import cv2
#import pantilthat
import time
import signal
import time
import readchar
MAXDIST = np.sqrt(2)*255
class Actor:
def __init__(self, state_dim, action_dim, action_bound, std_bound):
self.state_dim = state_dim
self.action_dim = action_dim
self.action_bound = action_bound
self.std_bound = std_bound
self.model = self.create_model()
self.opt = tf.keras.optimizers.Adam(learning_rate=0.002)
def create_model(self,layers=1):
def rescale(a):
return a*tf.constant(self.action_bound)
state_input = tf.keras.layers.Input((self.state_dim,))
dense = tf.keras.layers.Dense(5, activation='relu')(state_input)
for l in range(1,layers-1):
dense = tf.keras.layers.Dense(10, activation='relu')(dense)
out_mu_1 = tf.keras.layers.Dense(self.action_dim, activation='tanh')(dense)
mu_output_1 = tf.keras.layers.Lambda(lambda x: rescale(x))(out_mu_1)
std_output_1 = tf.keras.layers.Dense(self.action_dim, activation='softplus')(dense)
out_mu_2 = tf.keras.layers.Dense(self.action_dim, activation='tanh')(dense)
mu_output_2 = tf.keras.layers.Lambda(lambda x: rescale(x))(out_mu_2)
std_output_2 = tf.keras.layers.Dense(self.action_dim, activation='softplus')(dense)
return tf.keras.models.Model(state_input, [mu_output_1, std_output_1, mu_output_2, std_output_2])
def get_action(self, state):
state = np.reshape(state, [1, self.state_dim])
mu1, std1, mu2, std2 = self.model.predict(state)
mu1, std1 = mu1[0], std1[0]
mu2, std2 = mu2[0], std2[0]
action1 = np.random.normal(mu1, std1, size=self.action_dim)
action2 = np.random.normal(mu2, std2, size=self.action_dim)
return [action1, action2]
def compute_loss(self, mu, std, actions, advantages):
dist = tfp.distributions.Normal(loc=mu, scale=std)
loss_policy = (-dist.log_prob(value=actions) * advantages + 0.002*dist.entropy())
return tf.reduce_sum(loss_policy)
def train(self, states, actions, advantages):
with tf.GradientTape() as tape:
mu, std = self.model(states, training=True)
loss = self.compute_loss(mu, std, actions, advantages)
grads = tape.gradient(loss, self.model.trainable_variables)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss
class Critic:
def __init__(self, state_dim):
self.state_dim = state_dim
self.model = self.create_model()
self.opt = tf.keras.optimizers.Adam(learning_rate=0.002)
def create_model(self):
state_input = tf.keras.layers.Input((self.state_dim,))
dense_1 = tf.keras.layers.Dense(5, activation='relu')(state_input)
dense_2 = tf.keras.layers.Dense(10, activation='relu')(dense_1)
v = tf.keras.layers.Dense(1, activation='linear')(dense_2)
return tf.keras.models.Model(state_input, v)
def compute_loss(self, v_pred, td_targets):
mse = tf.keras.losses.MeanSquaredError()
return mse(td_targets, v_pred)
@tf.function
def train(self, states, td_targets):
with tf.GradientTape() as tape:
v_pred = self.model(states, training=True)
assert v_pred.shape == td_targets.shape
loss = self.compute_loss(v_pred, tf.stop_gradient(td_targets))
grads = tape.gradient(loss, self.model.trainable_variables)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss
class A2CAgent:
def __init__(self, observation_space_shape, action_space_shape, action_space_high):
self.state_dim = observation_space_shape
self.action_dim = action_space_shape
self.action_bound = action_space_high
self.std_bound = [1e-3, 1.0]
self.gamma = 0.99
self.actor = Actor(self.state_dim, self.action_dim,
self.action_bound, self.std_bound)
self.critic = Critic(self.state_dim)
def td_target(self, reward, next_state, done):
if done:
return reward
v_value = self.critic.model.predict(
np.reshape(next_state, [1, self.state_dim]))
return np.reshape(reward + self.gamma * v_value[0], [1, 1])
def advantage(self, td_targets, baselines):
return td_targets - baselines
def list_to_batch(self, list):
batch = list[0]
for elem in list[1:]:
batch = np.append(batch, elem, axis=0)
return batch
class Tracker(gym.Env):
def __init__(self,Agent):
self.min_action = -90.0
self.max_action = 90.0
self.min_position = 0.0
self.max_position = 255.0
self.goal_position = (
0.0
)
self.low_state = np.array(
[self.min_position, self.min_position, self.min_position, self.min_position], dtype=np.float32
)
self.high_state = np.array(
[self.max_position, self.max_position, self.max_position ,self.max_position], dtype=np.float32
)
self.low_action = np.array(
[self.min_action], dtype=np.float32
)
self.high_action = np.array(
[self.max_action], dtype=np.float32
)
self.viewer = None
self.action_space = spaces.Box(
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state, high=self.high_state, shape=(4,), dtype=np.float32
)
self.seed()
self.reset()
self.servo = self.Servo(Agent,
observation_space_shape=self.observation_space.shape[0],
action_space_shape=self.action_space.shape[0],
action_space_high=self.action_space.high[0])
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def initialize_state(self):
self.state=
|
np.array([0,0,0,0])
|
numpy.array
|
##### MODEL AND DATA LOADING
import torch
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import re
import numpy as np
import os
import copy
from skimage.transform import resize
from helpers import makedir, find_high_activation_crop
import model
import push
import train_and_test as tnt
import save
from log import create_logger
from preprocess import mean, std, preprocess_input_function, undo_preprocess_input_function
import argparse
import pandas as pd
import ast
import png
k=3
# specify the test image to be analyzed
parser = argparse.ArgumentParser()
parser.add_argument('-test_img_name', nargs=1, type=str, default='0')
parser.add_argument('-test_img_dir', nargs=1, type=str, default='0')
parser.add_argument('-test_img_label', nargs=1, type=int, default='-1')
parser.add_argument('-test_model_dir', nargs=1, type=str, default='0')
parser.add_argument('-test_model_name', nargs=1, type=str, default='0')
args = parser.parse_args()
test_image_dir = args.test_img_dir[0]
test_image_name = args.test_img_name[0] #'DP_AJOD_196544.npy' # 'DP_AAPR_R_MLO_3#0.npy' #
test_image_label = args.test_img_label[0]
test_image_path = os.path.join(test_image_dir, test_image_name)
# load the model
check_test_accu = False
load_model_dir = args.test_model_dir[0] #'/usr/xtmp/mammo/alina_saved_models/vgg16/finer_1118_top2percent_randseed=1234/'
load_model_name = args.test_model_name[0] # '100_9push0.9258.pth'
#if load_model_dir[-1] == '/':
# model_base_architecture = load_model_dir.split('/')[-3]
# experiment_run = load_model_dir.split('/')[-2]
#else:
# model_base_architecture = load_model_dir.split('/')[-2]
# experiment_run = load_model_dir.split('/')[-1]
model_base_architecture = load_model_dir.split('/')[-3]
experiment_run = '/'.join(load_model_dir.split('/')[-2:])
save_analysis_path = os.path.join(load_model_dir, test_image_name)
makedir(save_analysis_path)
print(save_analysis_path)
log, logclose = create_logger(log_filename=os.path.join(save_analysis_path, 'local_analysis.log')) #logger fails on Colab
# def log(string_here):
# print(string_here)
# def logclose():
# pass
load_model_path = os.path.join(load_model_dir, load_model_name)
epoch_number_str = re.search(r'\d+', load_model_name).group(0)
start_epoch_number = int(epoch_number_str)
log('load model from ' + load_model_path)
log('model base architecture: ' + model_base_architecture)
log('experiment run: ' + experiment_run)
ppnet = torch.load(load_model_path)
ppnet = ppnet.cuda()
ppnet_multi = torch.nn.DataParallel(ppnet)
img_size = ppnet_multi.module.img_size
prototype_shape = ppnet.prototype_shape
max_dist = prototype_shape[1] * prototype_shape[2] * prototype_shape[3]
class_specific = True
normalize = transforms.Normalize(mean=mean,
std=std)
# load the test data and check test accuracy
from settings import test_dir
if check_test_accu:
test_batch_size = 100
test_dataset = datasets.ImageFolder(
test_dir,
transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize,
]))
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=True,
num_workers=4, pin_memory=False)
log('test set size: {0}'.format(len(test_loader.dataset)))
accu = tnt.test(model=ppnet_multi, dataloader=test_loader,
class_specific=class_specific, log=print)
##### SANITY CHECK
# confirm prototype class identity
load_img_dir = os.path.join(load_model_dir, 'img')
prototype_info = np.load(os.path.join(load_img_dir, 'epoch-'+epoch_number_str, 'bb'+epoch_number_str+'.npy'))
prototype_img_identity = prototype_info[:, -1]
num_classes = len(set(prototype_img_identity))
log('Prototypes are chosen from ' + str(len(set(prototype_img_identity))) + ' number of classes.')
log('Their class identities are: ' + str(prototype_img_identity))
# confirm prototype connects most strongly to its own class
prototype_max_connection = torch.argmax(ppnet.last_layer.weight, dim=0)
prototype_max_connection = prototype_max_connection.cpu().numpy()
if np.sum(prototype_max_connection == prototype_img_identity) == ppnet.num_prototypes:
log('All prototypes connect most strongly to their respective classes.')
else:
log('WARNING: Not all prototypes connect most strongly to their respective classes.')
##### HELPER FUNCTIONS FOR PLOTTING
def save_preprocessed_img(fname, preprocessed_imgs, index=0):
img_copy = copy.deepcopy(preprocessed_imgs[index:index+1])
undo_preprocessed_img = undo_preprocess_input_function(img_copy)
print('image index {0} in batch'.format(index))
undo_preprocessed_img = undo_preprocessed_img[0]
undo_preprocessed_img = undo_preprocessed_img.detach().cpu().numpy()
undo_preprocessed_img = np.transpose(undo_preprocessed_img, [1,2,0])
plt.imsave(fname, undo_preprocessed_img)
return undo_preprocessed_img
def save_prototype(fname, epoch, index):
p_img = plt.imread(os.path.join(load_img_dir, 'epoch-'+str(epoch), 'prototype-img'+str(index)+'.png'))
#plt.axis('off')
plt.imsave(fname, p_img)
def save_prototype_self_activation(fname, epoch, index):
p_img = plt.imread(os.path.join(load_img_dir, 'epoch-'+str(epoch),
'prototype-img-original_with_self_act'+str(index)+'.png'))
#plt.axis('off')
plt.imsave(fname, p_img)
def save_prototype_original_img_with_bbox(fname, epoch, index,
bbox_height_start, bbox_height_end,
bbox_width_start, bbox_width_end, color=(0, 255, 255)):
p_img_bgr = cv2.imread(os.path.join(load_img_dir, 'epoch-'+str(epoch), 'prototype-img-original'+str(index)+'.png'))
cv2.rectangle(p_img_bgr, (bbox_width_start, bbox_height_start), (bbox_width_end-1, bbox_height_end-1),
color, thickness=2)
p_img_rgb = p_img_bgr[...,::-1]
p_img_rgb = np.float32(p_img_rgb) / 255
#plt.imshow(p_img_rgb)
#plt.axis('off')
plt.imsave(fname, p_img_rgb)
def save_prototype_full_size(fname, epoch, index,
color=(0, 255, 255)):
p_img_bgr = cv2.imread(os.path.join(load_img_dir, 'epoch-'+str(epoch), 'prototype-img-original'+str(index)+'.png'))
p_img_rgb = p_img_bgr[...,::-1]
p_img_rgb = np.float32(p_img_rgb) / 255
#plt.imshow(p_img_rgb)
#plt.axis('off')
plt.imsave(fname, p_img_rgb)
def imsave_with_bbox(fname, img_rgb, bbox_height_start, bbox_height_end,
bbox_width_start, bbox_width_end, color=(0, 255, 255)):
img_bgr_uint8 = cv2.cvtColor(np.uint8(255*img_rgb), cv2.COLOR_RGB2BGR)
cv2.rectangle(img_bgr_uint8, (bbox_width_start, bbox_height_start), (bbox_width_end-1, bbox_height_end-1),
color, thickness=2)
img_rgb_uint8 = img_bgr_uint8[...,::-1]
img_rgb_float = np.float32(img_rgb_uint8) / 255
#plt.imshow(img_rgb_float)
#plt.axis('off')
plt.imsave(fname, img_rgb_float)
# load the test image and forward it through the network
preprocess = transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.ToTensor(),
normalize
])
img_pil = Image.open(test_image_path)
img_tensor = preprocess(img_pil)
img_variable = Variable(img_tensor.unsqueeze(0))
images_test = img_variable.cuda()
labels_test = torch.tensor([test_image_label])
logits, min_distances = ppnet_multi(images_test)
conv_output, distances = ppnet.push_forward(images_test)
prototype_activations = ppnet.distance_2_similarity(min_distances)
prototype_activation_patterns = ppnet.distance_2_similarity(distances)
if ppnet.prototype_activation_function == 'linear':
prototype_activations = prototype_activations + max_dist
prototype_activation_patterns = prototype_activation_patterns + max_dist
tables = []
for i in range(logits.size(0)):
tables.append((torch.argmax(logits, dim=1)[i].item(), labels_test[i].item()))
log(str(i) + ' ' + str(tables[-1]))
idx = 0
predicted_cls = tables[idx][0]
correct_cls = test_image_label#tables[idx][1]
log('Predicted: ' + str(predicted_cls))
log('Actual: ' + str(correct_cls))
original_img = save_preprocessed_img(os.path.join(save_analysis_path, 'original_img.png'),
images_test, idx)
##### MOST ACTIVATED (NEAREST) 10 PROTOTYPES OF THIS IMAGE
makedir(os.path.join(save_analysis_path, 'most_activated_prototypes'))
max_act = 0
log('Most activated 5 prototypes of this image:')
array_act, sorted_indices_act = torch.sort(prototype_activations[idx])
for i in range(1,6):
log('top {0} activated prototype for this image:'.format(i))
save_prototype(os.path.join(save_analysis_path, 'most_activated_prototypes',
'top-%d_activated_prototype.png' % i),
start_epoch_number, sorted_indices_act[-i].item())
save_prototype_full_size(fname=os.path.join(save_analysis_path, 'most_activated_prototypes',
'top-%d_activated_prototype_full_size.png' % i),
epoch=start_epoch_number,
index=sorted_indices_act[-i].item(),
color=(0, 255, 255))
save_prototype_original_img_with_bbox(fname=os.path.join(save_analysis_path, 'most_activated_prototypes',
'top-%d_activated_prototype_in_original_pimg.png' % i),
epoch=start_epoch_number,
index=sorted_indices_act[-i].item(),
bbox_height_start=prototype_info[sorted_indices_act[-i].item()][1],
bbox_height_end=prototype_info[sorted_indices_act[-i].item()][2],
bbox_width_start=prototype_info[sorted_indices_act[-i].item()][3],
bbox_width_end=prototype_info[sorted_indices_act[-i].item()][4],
color=(0, 255, 255))
save_prototype_self_activation(os.path.join(save_analysis_path, 'most_activated_prototypes',
'top-%d_activated_prototype_self_act.png' % i),
start_epoch_number, sorted_indices_act[-i].item())
log('prototype index: {0}'.format(sorted_indices_act[-i].item()))
log('prototype class identity: {0}'.format(prototype_img_identity[sorted_indices_act[-i].item()]))
if prototype_max_connection[sorted_indices_act[-i].item()] != prototype_img_identity[sorted_indices_act[-i].item()]:
log('prototype connection identity: {0}'.format(prototype_max_connection[sorted_indices_act[-i].item()]))
log('activation value (similarity score): {0}'.format(array_act[-i]))
f = open(save_analysis_path + '/most_activated_prototypes/' + 'top-' + str(i) + '_activated_prototype.txt', "w")
f.write('similarity: {0:.3f}\n'.format(array_act[-i].item()))
f.write('last layer connection with predicted class: {0} \n'.format(ppnet.last_layer.weight[predicted_cls][sorted_indices_act[-i].item()]))
f.write('proto index:')
f.write(str(sorted_indices_act[-i].item()) + '\n')
for class_id_ in range(num_classes):
f.write(f'proto connection to class {class_id_}:')
f.write(str(ppnet.last_layer.weight[class_id_][sorted_indices_act[-i].item()]) + '\n')
f.close()
log('last layer connection with predicted class: {0}'.format(ppnet.last_layer.weight[predicted_cls][sorted_indices_act[-i].item()]))
activation_pattern = prototype_activation_patterns[idx][sorted_indices_act[-i].item()].detach().cpu().numpy()
upsampled_activation_pattern = cv2.resize(activation_pattern, dsize=(img_size, img_size),
interpolation=cv2.INTER_CUBIC)
# show the most highly activated patch of the image by this prototype
high_act_patch_indices = find_high_activation_crop(upsampled_activation_pattern)
high_act_patch = original_img[high_act_patch_indices[0]:high_act_patch_indices[1],
high_act_patch_indices[2]:high_act_patch_indices[3], :]
log('most highly activated patch of the chosen image by this prototype:'
+ str(os.path.join(save_analysis_path, 'most_activated_prototypes',
'most_highly_activated_patch_by_top-%d_prototype.png' % i)))
#plt.axis('off')
plt.imsave(os.path.join(save_analysis_path, 'most_activated_prototypes',
'most_highly_activated_patch_by_top-%d_prototype.png' % i),
high_act_patch)
log('most highly activated patch by this prototype shown in the original image:'
+ str(os.path.join(save_analysis_path, 'most_activated_prototypes',
'most_highly_activated_patch_in_original_img_by_top-%d_prototype.png' % i)))
imsave_with_bbox(fname=os.path.join(save_analysis_path, 'most_activated_prototypes',
'most_highly_activated_patch_in_original_img_by_top-%d_prototype.png' % i),
img_rgb=original_img,
bbox_height_start=high_act_patch_indices[0],
bbox_height_end=high_act_patch_indices[1],
bbox_width_start=high_act_patch_indices[2],
bbox_width_end=high_act_patch_indices[3], color=(0, 255, 255))
# show the image overlayed with prototype activation map
rescaled_activation_pattern = upsampled_activation_pattern - np.amin(upsampled_activation_pattern)
rescaled_activation_pattern = rescaled_activation_pattern / np.amax(rescaled_activation_pattern)
heatmap = cv2.applyColorMap(np.uint8(255*rescaled_activation_pattern), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[...,::-1]
overlayed_img = 0.5 * original_img + 0.3 * heatmap
log('prototype activation map of the chosen image:' + str(os.path.join(save_analysis_path, 'most_activated_prototypes',
'prototype_activation_map_by_top-%d_prototype.png' % i)))
#plt.axis('off')
plt.imsave(os.path.join(save_analysis_path, 'most_activated_prototypes',
'prototype_activation_map_by_top-%d_prototype.png' % i),
overlayed_img)
# show the image overlayed with different normalized prototype activation map
rescaled_activation_pattern = upsampled_activation_pattern - np.amin(upsampled_activation_pattern)
# get the max activation of any proto on this image (works because we start with highest act, must be on rescale)
if np.amax(rescaled_activation_pattern) > max_act:
max_act = np.amax(rescaled_activation_pattern)
rescaled_activation_pattern = rescaled_activation_pattern / max_act
heatmap = cv2.applyColorMap(np.uint8(255*rescaled_activation_pattern), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[...,::-1]
overlayed_img = 0.5 * original_img + 0.3 * heatmap
#plt.axis('off')
log('normalized prototype activation map of the chosen image:'
+ str(os.path.join(save_analysis_path, 'most_activated_prototypes',
'prototype_activation_map_by_top-%d_prototype_normed.png' % i)))
plt.imsave(os.path.join(save_analysis_path, 'most_activated_prototypes',
'prototype_activation_map_by_top-%d_prototype_normed.png' % i),
overlayed_img)
log('--------------------------------------------------------------')
log('***************************************************************')
log('***************************************************************')
##### PROTOTYPES FROM TOP-k CLASSES
log('Prototypes from top-%d classes:' % k)
topk_logits, topk_classes = torch.topk(logits[idx], k=k)
for i,c in enumerate(topk_classes.detach().cpu().numpy()):
makedir(os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1)))
log('top %d predicted class: %d' % (i+1, c))
log('logit of the class: %f' % topk_logits[i])
class_prototype_indices = np.nonzero(ppnet.prototype_class_identity.detach().cpu().numpy()[:, c])[0]
class_prototype_activations = prototype_activations[idx][class_prototype_indices]
_, sorted_indices_cls_act = torch.sort(class_prototype_activations)
prototype_cnt = 1
for j in reversed(sorted_indices_cls_act.detach().cpu().numpy()):
prototype_index = class_prototype_indices[j]
save_prototype(os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'top-%d_activated_prototype.png' % prototype_cnt),
start_epoch_number,
prototype_index)
save_prototype_full_size(fname=os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'top-%d_activated_prototype_full_size.png' % prototype_cnt),
epoch=start_epoch_number,
index=prototype_index,
color=(0, 255, 255))
save_prototype_original_img_with_bbox(fname=os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'top-%d_activated_prototype_in_original_pimg.png' % prototype_cnt),
epoch=start_epoch_number,
index=prototype_index,
bbox_height_start=prototype_info[prototype_index][1],
bbox_height_end=prototype_info[prototype_index][2],
bbox_width_start=prototype_info[prototype_index][3],
bbox_width_end=prototype_info[prototype_index][4],
color=(0, 255, 255))
save_prototype_self_activation(os.path.join(save_analysis_path,
'top-%d_class_prototypes' % (i+1),
'top-%d_activated_prototype_self_act.png' % prototype_cnt),
start_epoch_number,
prototype_index)
log('prototype index: {0}'.format(prototype_index))
log('prototype class identity: {0}'.format(prototype_img_identity[prototype_index]))
if prototype_max_connection[prototype_index] != prototype_img_identity[prototype_index]:
log('prototype connection identity: {0}'.format(prototype_max_connection[prototype_index]))
log('activation value (similarity score): {0}'.format(prototype_activations[idx][prototype_index]))
log('last layer connection: {0}'.format(ppnet.last_layer.weight[c][prototype_index]))
activation_pattern = prototype_activation_patterns[idx][prototype_index].detach().cpu().numpy()
upsampled_activation_pattern = cv2.resize(activation_pattern, dsize=(img_size, img_size),
interpolation=cv2.INTER_CUBIC)
# logging
f = open(save_analysis_path + '/top-' + str(i+1) + '_class_prototypes/' + 'top-' + str(prototype_cnt) + '_activated_prototype.txt', "w")
f.write('similarity: {0:.3f}\n'.format(prototype_activations[idx][prototype_index]))
f.write('last layer connection: {0:.3f}\n'.format(ppnet.last_layer.weight[c][prototype_index]))
f.write('proto index: ' + str(prototype_index) + '\n')
for class_id_ in range(num_classes):
f.write(f'proto connection to class {class_id_}:')
f.write(str(ppnet.last_layer.weight[class_id_][prototype_index]) + '\n')
f.close()
# show the most highly activated patch of the image by this prototype
high_act_patch_indices = find_high_activation_crop(upsampled_activation_pattern)
high_act_patch = original_img[high_act_patch_indices[0]:high_act_patch_indices[1],
high_act_patch_indices[2]:high_act_patch_indices[3], :]
log('most highly activated patch of the chosen image by this prototype:' +
str(os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'most_highly_activated_patch_by_top-%d_prototype.png' % prototype_cnt)))
#plt.axis('off')
plt.imsave(os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'most_highly_activated_patch_by_top-%d_prototype.png' % prototype_cnt),
high_act_patch)
log('most highly activated patch by this prototype shown in the original image:'
+ str(os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'most_highly_activated_patch_in_original_img_by_top-%d_prototype.png' % prototype_cnt)))
imsave_with_bbox(fname=os.path.join(save_analysis_path, 'top-%d_class_prototypes' % (i+1),
'most_highly_activated_patch_in_original_img_by_top-%d_prototype.png' % prototype_cnt),
img_rgb=original_img,
bbox_height_start=high_act_patch_indices[0],
bbox_height_end=high_act_patch_indices[1],
bbox_width_start=high_act_patch_indices[2],
bbox_width_end=high_act_patch_indices[3], color=(0, 255, 255))
# show the image overlayed with prototype activation map
rescaled_activation_pattern = upsampled_activation_pattern - np.amin(upsampled_activation_pattern)
rescaled_activation_pattern = rescaled_activation_pattern /
|
np.amax(rescaled_activation_pattern)
|
numpy.amax
|
"""
(c) RIKEN 2015. All rights reserved.
Author: <NAME>
This software is released under the new BSD License; see LICENSE.
"""
"""
NOTE on unit cell constraints determination:
XDS doesn't handle "real" rhombohedral space group (right?).
So, No need to support R3 or R32. They are handled as H3 or H32, maybe.
"""
import re
import numpy
import collections
from cctbx import sgtbx
def rotations_to_missetting_angles(vals):
a = numpy.array(vals)
t = numpy.deg2rad(numpy.linalg.norm(a))
u = a / numpy.linalg.norm(a)
ct, st = numpy.cos(t), numpy.sin(t)
ux, uy, uz = u
R = numpy.matrix([[ct+ux*ux*(1-ct), ux*uy*(1-ct)-uz*st, ux*uz*(1-ct)+uy*st],
[uy*uz*(1-ct)+uz*st, ct+uy*uy*(1-ct), uy*uz*(1-ct)-ux*st],
[uz*ux*(1-ct)-uy*st, uz*uy*(1-ct)+ux*st, ct+uz*uz*(1-ct) ]
])
phi = numpy.zeros(3) # missetting angles
if 1. - numpy.abs(R[2,0]) < 0.0000001:
phi[0] = 0.
phi[1] =
|
numpy.sin(-R[2,0])
|
numpy.sin
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_train = pd.read_csv(r'D:\Users\Michelle\AppData\Local\Programs\Python\Python36\data\train.csv')
df_test = pd.read_csv(r'D:\Users\Michelle\AppData\Local\Programs\Python\Python36\data\test.csv')
def mean(numbers):
return sum(numbers) / float(len(numbers))
def variance(numbers, mean):
return sum([abs(x-mean)**2 for x in numbers])
def covariance(x_train,x_mean, y_train, y_mean):
ln = len(x_train)
cov = 0.0
for i in range(ln):
cov += ((x_train[i] - x_mean) * (y_train[i] - y_mean))
return cov
def coefficients():
m = covariance(x_test,x_mean, y_test, y_mean) / variance(x_test, x_mean)
b = y_mean - (m*x_mean)
return [m,b]
def simple_linear_regression():
prediction = []
m, c = coefficients()
for test in x_test:
y_pred = m*test[0] + c
prediction.append(y_pred)
return prediction
print(df_train.head())
print(df_test.head())
print(df_train.shape)
print(df_test.shape)
x_train = df_train['x']
y_train = df_train['y']
x_test = df_test['x']
y_test = df_test['y']
#print(x_train.head())
#print(y_train.head())
#print(x_test.head())
#print(y_test.head())
x_train =
|
np.array(x_train)
|
numpy.array
|
"""
Generates plots / figures when run as a script.
Plot files are placed in the :file:`plots` directory.
By default, simply running ``python -m src.plots`` generates **ALL** plots,
which may not be desired. Instead, one can pass a list of plots to generate:
``python -m src.plots plot1 plot2 ...``. The full list of plots is shown in
the usage information ``python -m src.plots --help``.
Typing can be reduced by using shell brace expansion, e.g. ``python -m
src.plots observables_{design,posterior}`` for both ``observables_design`` and
``observables_posterior``. In addition, plots may be given as paths to plot
filenames, which enables shell globbing, e.g. ``python -m src.plots
plots/observables_*``.
In the code, each plot is generated by a function tagged with the ``@plot``
decorator.
"""
from collections import OrderedDict, Counter
import itertools
import logging
import multiprocessing
from pathlib import Path
import pickle
import subprocess
import tempfile
import warnings
import h5py
import hsluv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import lines
from matplotlib import patches
from matplotlib import ticker
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from scipy import special
from scipy.interpolate import PchipInterpolator, UnivariateSpline, interp2d
from scipy.optimize import curve_fit
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from sklearn.gaussian_process import kernels
from sklearn.model_selection import KFold
from . import cachedir, workdir, systems, parse_system, expt, model, mcmc
from .design import Design
from .emulator import Emulator, emulators
# golden ratio
aspect = 1/1.618
# font sizes
fontsize = dict(
large=11,
normal=10,
small=9,
tiny=8,
)
# new tableau colors
# https://www.tableau.com/about/blog/2016/7/colors-upgrade-tableau-10-56782
colors = OrderedDict([
('blue', '#4e79a7'),
('orange', '#f28e2b'),
('green', '#59a14f'),
('red', '#e15759'),
('cyan', '#76b7b2'),
('purple', '#b07aa1'),
('brown', '#9c755f'),
('yellow', '#edc948'),
('pink', '#ff9da7'),
('gray', '#bab0ac')
])
offblack = '.15'
plt.rcdefaults()
plt.rcParams.update({
'font.family': 'sans-serif',
'font.sans-serif': ['Lato'],
'mathtext.fontset': 'custom',
'mathtext.default': 'it',
'mathtext.rm': 'sans',
'mathtext.cal': 'sans',
'font.size': fontsize['normal'],
'legend.fontsize': fontsize['normal'],
'axes.labelsize': fontsize['normal'],
'axes.titlesize': fontsize['large'],
'xtick.labelsize': fontsize['small'],
'ytick.labelsize': fontsize['small'],
'font.weight': 400,
'axes.labelweight': 400,
'axes.titleweight': 400,
'axes.prop_cycle': plt.cycler('color', list(colors.values())),
'lines.linewidth': .8,
'lines.markersize': 3,
'lines.markeredgewidth': 0,
'patch.linewidth': .8,
'hatch.linewidth': .8,
'axes.linewidth': .6,
'xtick.major.width': .6,
'ytick.major.width': .6,
'xtick.minor.width': .4,
'ytick.minor.width': .4,
'xtick.major.size': 3.,
'ytick.major.size': 3.,
'xtick.minor.size': 2.,
'ytick.minor.size': 2.,
'xtick.major.pad': 3.5,
'ytick.major.pad': 3.5,
'axes.labelpad': 4.,
'axes.formatter.limits': (-5, 5),
'axes.spines.top': False,
'axes.spines.right': False,
'text.color': offblack,
'axes.edgecolor': offblack,
'axes.labelcolor': offblack,
'xtick.color': offblack,
'ytick.color': offblack,
'legend.frameon': False,
'image.cmap': 'Blues',
'image.interpolation': 'none',
})
plotdir = workdir / 'plots'
plotdir.mkdir(exist_ok=True)
plot_functions = {}
def plot(f):
"""
Plot function decorator. Calls the function, does several generic tasks,
and saves the figure as the function name.
"""
def wrapper(*args, **kwargs):
logging.info('generating plot: %s', f.__name__)
f(*args, **kwargs)
fig = plt.gcf()
plotfile = plotdir / '{}.pdf'.format(f.__name__)
fig.savefig(str(plotfile))
logging.info('wrote %s', plotfile)
plt.close(fig)
plot_functions[f.__name__] = wrapper
return wrapper
def figsize(relwidth=1, aspect=.618, refwidth=6.5):
"""
Return figure dimensions from a relative width (to a reference width) and
aspect ratio (default: 1/golden ratio).
"""
width = relwidth * refwidth
return width, width*aspect
def set_tight(fig=None, **kwargs):
"""
Set tight_layout with a better default pad.
"""
if fig is None:
fig = plt.gcf()
kwargs.setdefault('pad', .1)
fig.set_tight_layout(kwargs)
def auto_ticks(ax, axis='both', minor=False, **kwargs):
"""
Convenient interface to matplotlib.ticker locators.
"""
axis_list = []
if axis in {'x', 'both'}:
axis_list.append(ax.xaxis)
if axis in {'y', 'both'}:
axis_list.append(ax.yaxis)
for axis in axis_list:
axis.get_major_locator().set_params(**kwargs)
if minor:
axis.set_minor_locator(ticker.AutoMinorLocator(minor))
def cmap_to_alpha(cmap=plt.cm.inferno, fraction=.2):
"""
Fade the bottom of a colormap to white.
Currently only works with ListedColormap objects, of which the new cmaps
(inferno, magma, plasma, viridis) are examples.
"""
try:
colors = cmap.colors.copy()
n = int(fraction*len(colors))
for i, rgb in enumerate(colors[:n]):
colors[i] = rgb + [i/n]
return type(cmap)(colors, cmap.name + '_mod')
except AttributeError:
cmin = 25 if cmap == plt.cm.Oranges_r else 0
colors = [list(cmap(n/256)) for n in range(cmin, 200)]
n = int(fraction*len(colors))
for i, rgb in enumerate(colors[:n]):
rgb[-1] = i/n
colors[i] = rgb
return ListedColormap(colors, cmap.name + '_mod')
def format_system(system):
"""
Format a system string into a display name, e.g.:
>>> format_system('PbPb2760')
'Pb-Pb 2.76 TeV'
>>> format_system('AuAu200')
'Au-Au 200 GeV'
"""
proj, energy = parse_system(system)
if energy > 1000:
energy /= 1000
prefix = 'T'
else:
prefix = 'G'
return '{} {} {}eV'.format('-'.join(proj), energy, prefix)
def darken(color_hex, amount=.5):
"""
Darken a color by the given amount in HSLuv space.
"""
return color_hex
#H, S, L = hsluv.hex_to_hsluv(color_hex)
#return hsluv.hsluv_to_rgb((H, S, (1 - amount)*L))
def obs_color_hsluv(obs, subobs):
"""
Return a nice color for the given observable in HSLuv space.
Use obs_color() to obtain an RGB color.
"""
if obs in {'dNch_deta', 'pT_fluct'}:
return 250, 90, 55
if obs == 'mean_pT':
return 230, 90, 65
if obs == 'dET_deta':
return 10, 65, 55
if obs in {'iden_dN_dy', 'iden_mean_pT'}:
return dict(
charged=(250, 90, 55),
pion=(210, 85, 70),
kaon=(130, 88, 68),
proton=(30, 90, 62),
)[subobs]
if obs == 'vnk':
return {
(2, 2): (250, 90, 65),
(3, 2): (150, 90, 67),
(4, 2): (20, 90, 62),
(2, 4): (310, 70, 50),
}[subobs]
raise ValueError('unknown observable: {} {}'.format(obs, subobs))
def obs_color(obs, subobs):
"""
Return a nice color for the given observable.
"""
return hsluv.hsluv_to_rgb(obs_color_hsluv(obs, subobs))
def obs_label(obs, subobs, differentials=False, full_cumulants=False):
"""
Return a formatted label for the given observable.
"""
if obs.startswith('d') and obs.endswith('_deta'):
return (r'$d{}/d\eta$' if differentials else '${}$').format(
{'Nch': r'N_\mathrm{ch}', 'ET': r'E_T'}[obs[1:-5]])
id_parts_labels = {
'dN_dy': '$dN_{}/dy$' if differentials else '$N_{}$',
'mean_pT': r'$\langle p_T^{} \rangle$'
}
if obs in id_parts_labels:
return id_parts_labels[obs].format(
{'pion': '\pi', 'kaon': 'K', 'proton': 'p', None: '{}'}[subobs]
)
if obs == 'pT_fluct':
return r'$\delta p_T/\langle p_T \rangle$'
if obs == 'vnk':
n, k = subobs
return '$v_{}{}$'.format(
n,
(r'\{' + str(k) + r'\}') if full_cumulants else ''
)
def _observables_plots():
"""
Metadata for observables plots.
"""
def id_parts_plots(obs):
return [(obs, species, dict(label=label)) for species, label in [
('pion', '$\pi$'), ('kaon', '$K$'), ('proton', '$p$')
]]
return [
dict(
title='Yields',
xlabel=dict(
pPb5020='Centrality %',
PbPb5020='Centrality %',
),
ylabel=(
r'$dN_\mathrm{ch}/d\eta,\ dN/dy,\ dE_T/d\eta\ [\mathrm{GeV}]$'
),
xlim=dict(
pPb5020=(0, 60),
PbPb5020=(0, 80),
),
ylim=(1e2, 1e5),
yscale='log',
height_ratio=1.5,
subplots=[
('dNch_deta', None, dict(label=r'$N_\mathrm{ch}^{\times 25}$', scale=25)),
('dET_deta', None, dict(label=r'$E_T^{\times 5}$', scale=5)),
*id_parts_plots('iden_dN_dy')
]
),
dict(
title='Mean $p_T$',
xlabel=dict(
pPb5020=r'$n_\mathrm{ch} / \langle n_\mathrm{ch} \rangle$',
PbPb5020='Centrality %'
),
ylabel=r'$\langle p_T \rangle$ [GeV]',
xlim=dict(
pPb5020=(1, 6),
PbPb5020=(0, 80),
),
ylim=(0, 1.5),
subplots=[
('mean_pT', None, dict(label='ch')),
*id_parts_plots('iden_mean_pT')
]
),
dict(
title='Mean $p_T$ fluctuations',
xlabel=dict(
pPb5020=r'$n_\mathrm{ch} / \langle n_\mathrm{ch} \rangle$',
PbPb5020='Centrality %'
),
ylabel=r'$\delta p_T/\langle p_T \rangle$',
xlim=dict(
pPb5020=(1, 6),
PbPb5020=(0, 80),
),
ylim=(0, 0.05),
subplots=[('pT_fluct', None, dict())]
),
dict(
title='Flow cumulants',
xlabel=dict(
pPb5020=(r'$n_\mathrm{trk}^\mathrm{offline}/'
r'\langle n_\mathrm{trk}^\mathrm{offline} \rangle$'),
PbPb5020='Centrality %',
),
ylabel=r'$v_n\{2\}$',
xlim=dict(
pPb5020=(1, 6),
PbPb5020=(0, 80),
),
ylim=(0, .15),
subplots=[
('vnk', (n, 2), dict(label='$v_{}$'.format(n)))
for n in [2, 3, 4]
]
)
]
def _observables(posterior=False):
"""
Model observables at all design points or drawn from the posterior with
experimental data points.
"""
plots = _observables_plots()
plots.pop(2)
plot_fixes = [
dict(
ylabel=r'$dN_\mathrm{ch}/d\eta$',
ylim=(1, 1e4),
height_ratio=1,
subplots=[
('dNch_deta', None, dict(label=r'$N_\mathrm{ch}$', scale=1))
]
),
dict(
subplots=[('mean_pT', None, dict(label='ch'))]
),
dict()
]
for a, b in zip(plots, plot_fixes):
for k, v in b.items():
a[k] = v
fig, axes = plt.subplots(
nrows=len(plots), ncols=len(systems),
figsize=figsize(.8, aspect=1.1),
)
if posterior:
samples = mcmc.Chain().samples(100)
for (plot, system), ax in zip(itertools.product(plots, systems), axes.flat):
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
try:
model_data = model.data[system][obs][subobs]
x = model_data['x']
Y = (samples[system][obs][subobs]
if posterior else model_data['Y'])
except KeyError:
continue
if scale is not None:
Y = Y*scale
for y in Y:
ax.plot(x, y, color=color, alpha=.2, lw=.3)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
print(system, obs, subobs, 'not found')
pass
else:
x = dset['x']
y = dset['y']
yerr = np.sqrt(sum(
e**2 for e in dset['yerr'].values()
))
if scale is not None:
y = y*scale
yerr = yerr*scale
ax.errorbar(
x, y, yerr=yerr, fmt='o',
capsize=0, mfc='.25', mec='.25', mew=.2, zorder=1000
)
xmin, xmax = plot['xlim'][system]
ax.text(
x[-1] + .03*(xmax - xmin), y[-1], opts['label'],
color=darken(color), ha='left', va='center'
)
auto_ticks(ax, 'x', nbins=4, minor=2)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
if ax.is_first_row():
ax.set_title(format_system(system), va='top')
if ax.is_first_col():
ax.set_ylabel(plot['ylabel'])
else:
ax.set_yticklabels([])
if ax.is_last_col():
ax.text(
1.02, .5, plot['title'],
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.labelsize'], rotation=-90
)
ax.set_xlabel(plot['xlabel'][system])
ax.set_xlim(*plot['xlim'][system])
ax.set_ylim(plot['ylim'])
set_tight(fig, rect=[0, 0, .97, 1])
@plot
def observables_design():
_observables(posterior=False)
@plot
def observables_posterior():
_observables(posterior=True)
def observables(system):
"""
Model observables at all design points or drawn from the posterior with
experimental data points.
"""
plots = _observables_plots()
plots.pop(2)
plot_fixes = [
dict(
title=r'Yields $dN_\mathrm{ch}/d\eta$',
ylabel=r'$dN_\mathrm{ch}/d\eta$',
ylim=(1e1, 1e4) if system == 'PbPb5020' else (1, 1e3),
height_ratio=1,
subplots=[
('dNch_deta', None, dict(label=''))
]
),
dict(
title=r'Mean $p_T$ [GeV]',
subplots=[('mean_pT', None, dict(label=''))]),
dict(
title=r'Flow cumulants $v_n\{2\}$',
)
]
for a, b in zip(plots, plot_fixes):
for k, v in b.items():
a[k] = v
fig, axes = plt.subplots(
nrows=2, ncols=len(plots),
figsize=figsize(1, aspect=.6),
)
title = dict(
pPb5020=r'$p$-Pb 5.02 TeV',
PbPb5020=r'Pb-Pb 5.02 TeV',
)
for (posterior, plot), ax in zip(
itertools.product([False, True], plots), axes.flat):
if posterior:
samples = mcmc.Chain().samples(100)
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
try:
model_data = model.data[system][obs][subobs]
x = model_data['x']
Y = (samples[system][obs][subobs]
if posterior else model_data['Y'])
except KeyError:
continue
if scale is not None:
Y = Y*scale
for y in Y:
alpha = .2 if system == 'pPb5020' else 0.1
ax.plot(x, y, color=color, alpha=alpha, lw=.3)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
print(system, obs, subobs, 'not found')
pass
else:
x = dset['x']
y = dset['y']
yerr = np.sqrt(sum(
e**2 for e in dset['yerr'].values()
))
if scale is not None:
y = y*scale
yerr = yerr*scale
ax.errorbar(
x, y, yerr=yerr, fmt='o',
capsize=0, mfc='.25', mec='.25', mew=.2, zorder=1000
)
xmin, xmax = plot['xlim'][system]
ax.text(
x[-1] + .03*(xmax - xmin), y[-1], opts['label'],
color=darken(color), ha='left', va='center'
)
ax.set_xlim(*plot['xlim'][system])
ax.set_ylim(plot['ylim'])
auto_ticks(ax, 'x', nbins=4, minor=2)
auto_ticks(ax, 'y', nbins=4, minor=2)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
if ax.is_first_row():
ax.set_title(plot['title'], fontsize=fontsize['normal'])
ax.set_xticklabels([])
if ax.is_first_col():
ax.set_ylabel('Training data')
else:
ax.set_xlabel(plot['xlabel'][system])
if ax.is_first_col():
ax.set_ylabel('Posterior samples')
ax.annotate(title[system], xy=(.1, .1),
xycoords='axes fraction',ha='left', va='bottom')
set_tight()
@plot
def observables_ppb():
observables('pPb5020')
@plot
def observables_pbpb():
observables('PbPb5020')
@plot
def observables_map():
"""
Model observables and ratio to experiment at the maximum a posteriori
(MAP) estimate.
"""
systems = ['pPb5020', 'PbPb5020']
plots = _observables_plots()
ylim = {
'Yields': (1e-1, 1e5),
'Mean $p_T$': (0, 1.7),
'Mean $p_T$ fluctuations': (0, 0.05),
'Flow cumulants': (0, .12),
}
for n, p in enumerate(plots):
p['ylim'] = ylim[p['title']]
if p['title'] == 'Flow cumulants':
move_index = n
p.update(
ylabel=r'$v_n\{k\}$',
subplots=[
('vnk', nk, dict(label='$v_{}\{{{}\}}$'.format(*nk)))
for nk in [(2, 2), (2, 4), (3, 2), (4, 2)]
],
legend=True
)
fig = plt.figure(figsize=figsize(0.88, 1.5))
yields, mean_pT, mean_pT_fluct, flows = [
gridspec.GridSpecFromSubplotSpec(
2, 2, gs, height_ratios=[5, 1] if n == 0 else [3, 1],
hspace=0.1, wspace=.14
) for n, gs in enumerate(
gridspec.GridSpec(4, 1, height_ratios=[6, 4, 4, 4])
)
]
gridspecs = [yields, mean_pT, mean_pT_fluct, flows]
rows = zip(plots, gridspecs)
for nrow, (plot, gs) in enumerate(rows):
axes = [fig.add_subplot(ax) for ax in gs]
cols = zip(systems, axes[:2], axes[2:])
for ncol, (system, ax, ratio_ax) in enumerate(cols):
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
try:
x = model.map_data[system][obs][subobs]['x']
y = model.map_data[system][obs][subobs]['Y']
except KeyError:
continue
if scale is not None:
y = y*scale
ax.plot(x, y, color=color)
if 'label' in opts:
xmin, xmax = plot['xlim'][system]
ax.text(
x[-1] + .03*(xmax - xmin), y[-1], opts['label'],
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y']
yerr = dset['yerr']
yerrstat = yerr.get('stat')
yerrsys = yerr.get('sys', yerr.get('sum'))
if scale is not None:
yexp = yexp*scale
if yerrstat is not None:
yerrstat = yerrstat*scale
if yerrsys is not None:
yerrsys = yerrsys*scale
ax.errorbar(
x, yexp, yerr=yerrstat, fmt='o',
capsize=0, mfc='.25', mec='.25', mew=.2, zorder=1000
)
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
color='.9', zorder=-10
)
ratio_ax.plot(x, y/yexp, color=color)
# main axes
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
ax.set_xticklabels([])
ax.set_xlim(*plot['xlim'][system])
ax.set_ylim(plot['ylim'])
if nrow == 0:
ax.set_title(format_system(system))
if ncol == 0:
ax.set_ylabel(plot['ylabel'])
ratio_ax.set_ylabel('Ratio')
else:
ax.text(
1.02, .5, plot['title'],
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.labelsize'], rotation=-90
)
ax.set_yticklabels([])
ratio_ax.set_yticklabels([])
# ratio axes
ratio_ax.axhline(1, lw=.5, color='0.5', zorder=-100)
ratio_ax.axhspan(0.9, 1.1, color='0.95', zorder=-200)
ratio_ax.set_xlim(plot['xlim'][system])
ratio_ax.set_ylim(0.8, 1.2)
ratio_ax.set_xlabel(plot['xlabel'][system])
ratio_ax.set_yticks(np.arange(80, 121, 20)/100)
ratio_ax.get_yticklabels()[0].set_verticalalignment('bottom')
ratio_ax.get_yticklabels()[-1].set_verticalalignment('top')
set_tight(fig, h_pad=.5, rect=[0, 0, .97, 1])
#@plot
def find_map():
"""
Find the maximum a posteriori (MAP) point and compare emulator predictions
to experimental data.
"""
from scipy.optimize import minimize
chain = mcmc.Chain()
fixed_params = {
'parton_number': 6,
'Tswitch': 0.151,
}
opt_params = [k for k in chain.keys if k not in fixed_params]
def full_x(x):
x = dict(zip(opt_params, x), **fixed_params)
return [x[k] for k in chain.keys]
res = minimize(
lambda x: -chain.log_posterior(full_x(x))[0],
x0=np.median(chain.load(*opt_params, thin=1), axis=0),
tol=1e-8,
bounds=[
(a + 1e-6*(b - a), b - 1e-6*(b - a))
for (a, b), k in zip(chain.range, chain.keys)
if k in opt_params
]
)
logging.debug('optimization result:\n%s', res)
width = max(map(len, chain.keys)) + 2
logging.info(
'MAP params:\n%s',
'\n'.join(
k.ljust(width) + str(x) for k, x in zip(chain.keys, full_x(res.x))
)
)
pred = chain._predict(np.atleast_2d(full_x(res.x)))
plots = _observables_plots()
ylim = {
'Yields': (1e2, 1e5),
'Mean $p_T$': (0, 1.7),
'Flow cumulants': (0, .12),
}
for p in plots:
p['ylim'] = ylim[p['title']]
fig = plt.figure(figsize=figsize(1, 1.5))
gs = gridspec.GridSpec(6, 2, height_ratios=[3, 1, 3, 1, 3, 1])
axes = []
ratio_axes = []
nrow = len(plots)
ncol = len(systems)
for row, col in itertools.product(range(0, 2*nrow, 2), range(ncol)):
ax = fig.add_subplot(gs[row, col])
ratio_ax = fig.add_subplot(gs[row + 1, col], sharex=ax)
axes.append(ax)
ratio_axes.append(ratio_ax)
for (plot, system), ax, ratio_ax in zip(
itertools.product(plots, systems), axes, ratio_axes
):
for obs, subobs, opts in plot['subplots']:
color = obs_color(obs, subobs)
scale = opts.get('scale')
try:
x = model.data[system][obs][subobs]['x']
except KeyError:
continue
y = pred[system][obs][subobs][0]
if scale is not None:
y = y*scale
ax.plot(x, y, color=color)
if 'label' in opts and obs == 'vnk':
ax.text(
x[-1] + (.1 if system == 'pPb5020' else 3), y[-1],
opts['label'],
color=darken(color), ha='left', va='center'
)
try:
dset = expt.data[system][obs][subobs]
except KeyError:
continue
x = dset['x']
yexp = dset['y']
yerr = dset['yerr']
yerrstat = yerr.get('stat')
yerrsys = yerr.get('sys', yerr.get('sum'))
if scale is not None:
yexp = yexp*scale
if yerrstat is not None:
yerrstat = yerrstat*scale
if yerrsys is not None:
yerrsys = yerrsys*scale
ax.errorbar(
x, yexp, yerr=yerrstat, fmt='o', ms=1.7,
capsize=0, color='.25', zorder=1000
)
ax.fill_between(
x, yexp - yerrsys, yexp + yerrsys,
color='.9', zorder=-10
)
ratio_ax.plot(x, y/yexp, color=color)
if plot.get('yscale') == 'log':
ax.set_yscale('log')
ax.minorticks_off()
else:
auto_ticks(ax, 'y', nbins=4, minor=2)
ax.set_xticklabels([])
ax.set_ylim(plot['ylim'])
if ax.is_first_row():
ax.set_title(format_system(system))
if ax.is_first_col():
ax.set_ylabel(plot['ylabel'])
ratio_ax.set_ylabel('Ratio')
if ax.is_last_col():
ax.text(
1.08, .5, plot['title'],
transform=ax.transAxes, ha='left', va='center',
size=plt.rcParams['axes.labelsize'], rotation=-90
)
ratio_ax.axhline(1, lw=.5, color='0.5', zorder=-100)
ratio_ax.axhspan(0.9, 1.1, color='0.95', zorder=-200)
ratio_ax.set_xlabel(xlabel(system, obs))
ratio_ax.set_ylim(0.8, 1.2)
ratio_ax.set_yticks(np.arange(80, 121, 20)/100)
set_tight(fig, rect=(0, 0, .95, 1))
@plot
def flow_corr():
"""
Symmetric cumulants SC(m, n) at the MAP point compared to experiment.
"""
fig, axes = plt.subplots(
figsize=figsize(0.5, 1.2), sharex=True,
nrows=2, gridspec_kw=dict(height_ratios=[4, 5])
)
observables = ['sc', 'sc_normed']
ylims = [(-2.5e-6, 2.5e-6), (-.9, .8)]
labels = ['(4,2)', '(3,2)']
system = 'PbPb5020'
def label(*mn, normed=False):
fmt = r'\mathrm{{SC}}({0}, {1})'
if normed:
fmt += r'/\langle v_{0}^2 \rangle\langle v_{1}^2 \rangle'
return fmt.format(*mn).join('$$')
for obs, ylim, ax in zip(observables, ylims, axes.flat):
for (mn, cmap), lbl in zip([((4, 2), 'Blues'), ((3, 2), 'Oranges')], labels):
x = model.map_data[system][obs][mn]['x']
y = model.map_data[system][obs][mn]['Y']
ax.plot(x, y, color=getattr(plt.cm, cmap)(.7))
ax.text(1.02*x[-1], y[-1], lbl, va='center', ha='left')
ax.axhline(
0, color='.5', lw=plt.rcParams['xtick.major.width'],
zorder=-100
)
ax.set_xlim(0, 80)
ax.set_ylim(*ylim)
auto_ticks(ax, nbins=7, minor=2)
if ax.is_first_col():
ax.set_ylabel(label('m', 'n', normed='normed' in obs))
if ax.is_first_row():
ax.set_title('Pb-Pb 5.02 TeV')
else:
ax.set_xlabel('Centrality %')
# MAP estimate for Pb-Pb collisions at 5.02 TeV, calibrated to Pb-Pb
# data at 2.76 and 5.02 TeV using a model without nucleon substructure.
# symmetric cumulants
SC = np.array([
[2.5e+00, 5.8591e-09, 5.9204e-09],
[7.5e+00, 2.1582e-08, -2.1367e-08],
[1.5e+01, 1.2228e-07, -1.3942e-07],
[2.5e+01, 4.3989e-07, -5.4267e-07],
[3.5e+01, 9.4414e-07, -1.0677e-06],
[4.5e+01, 1.4138e-06, -1.4616e-06],
[5.5e+01, 1.4456e-06, -1.2317e-06],
[6.5e+01, 7.3726e-07, -3.3222e-07],
])
# normalized symmetric cumulants
NSC = np.array([
[2.5e+00, 7.3202e-02, 2.1091e-02],
[7.5e+00, 7.6282e-02, -2.0918e-02],
[1.5e+01, 1.5216e-01, -4.7261e-02],
[2.5e+01, 2.4814e-01, -8.6423e-02],
[3.5e+01, 3.4423e-01, -1.1640e-01],
[4.5e+01, 4.5614e-01, -1.4251e-01],
[5.5e+01, 6.1072e-01, -1.5021e-01],
])
for ax, obs in zip(axes, [SC, NSC]):
x, y42, y32 = obs.T
ax.plot(x, y42, color=plt.cm.Blues(.7), linestyle='dashed')
ax.plot(x, y32, color=plt.cm.Oranges(.7), linestyle='dashed')
solid_line = lines.Line2D([], [], color=offblack)
dashed_line = lines.Line2D([], [], linestyle='dashed', color=offblack)
handles = [solid_line, dashed_line]
labels = ["p-Pb, Pb-Pb 5.02 TeV", "Pb-Pb 2.76, 5.02 TeV"]
plt.legend(handles, labels, loc=8, title='Bayesian calibration on:')
set_tight(fig)
def format_ci(samples, ci=.9):
"""
Compute the median and a credible interval for an array of samples and
return a TeX-formatted string.
"""
cil, cih = mcmc.credible_interval(samples, ci=ci)
median = np.median(samples)
ul = median - cil
uh = cih - median
# decide precision for formatting numbers
# this is NOT general but it works for the present data
if abs(median) < .05 or (uh + ul) < abs(median) < .5:
precision = 3
elif abs(median) < 5:
precision = 2
else:
precision = 1
fmt = str(precision).join(['{:#.', 'f}'])
return ''.join([
'$', fmt.format(median),
'_{-', fmt.format(ul), '}',
'^{+', fmt.format(uh), '}$'
])
def _posterior(
params=None, ignore=None,
scale=1, pad_subplots=-.1, rect_r=1, rect_t=.99,
cmap=None
):
"""
Triangle plot of posterior marginal and joint distributions.
"""
chain = mcmc.Chain()
if params is None and ignore is None:
params = set(chain.keys)
elif params is not None:
params = set(params)
elif ignore is not None:
params = set(chain.keys) - set(ignore)
keys, labels, ranges = map(list, zip(*(
i for i in zip(chain.keys, chain.labels, chain.range)
if i[0] in params
)))
ndim = len(params)
data = chain.load(*keys).T
cmap = plt.get_cmap(cmap)
cmap.set_bad('white')
line_color = cmap(.8)
fill_color = cmap(.5, alpha=.1)
fig, axes = plt.subplots(
nrows=ndim, ncols=ndim,
sharex='col', sharey='row',
figsize=figsize(.15*scale*ndim, aspect=1)
)
for samples, key, lim, ax in zip(data, keys, ranges, axes.diagonal()):
counts, edges = np.histogram(samples, bins=50, range=lim)
x = (edges[1:] + edges[:-1]) / 2
y = .85 * (lim[1] - lim[0]) * counts / counts.max() + lim[0]
# smooth histogram with monotonic cubic interpolation
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, linewidth=1, color=line_color)
ax.fill_between(x, lim[0], y, color=fill_color, zorder=-10)
ax.set_xlim(lim)
ax.set_ylim(lim)
if key == 'dmin3':
samples = samples**(1/3)
ax.annotate(
format_ci(samples), (.62, .92), xycoords='axes fraction',
ha='center', va='bottom', fontsize=fontsize['large']
)
for ny, nx in zip(*np.tril_indices_from(axes, k=-1)):
axes[ny][nx].hist2d(
data[nx], data[ny], bins=100,
range=(ranges[nx], ranges[ny]),
cmap=cmap, cmin=1
)
axes[nx][ny].set_axis_off()
for ax in axes.flat:
ax.tick_params(length=2/3*plt.rcParams['xtick.major.size'])
for key, label, axb, axl in zip(keys, labels, axes[-1], axes[:, 0]):
for axis in [axb.xaxis, axl.yaxis]:
axis.set_label_text(
label.replace(r'\ [', '$\n$['),
)
axis.set_tick_params(labelsize=fontsize['tiny'])
if key == 'dmin3':
ticks = [0., 1.2, 1.5, 1.7]
axis.set_ticklabels(list(map(str, ticks)))
axis.set_ticks([t**3 for t in ticks])
else:
axis.set_major_locator(ticker.LinearLocator(3))
if axis.axis_name == 'x' and any(
len(str(round(x, 5))) > 4 for x in axis.get_ticklocs()
):
for t in axis.get_ticklabels():
t.set_rotation(30)
axb.get_xticklabels()[0].set_horizontalalignment('left')
axb.get_xticklabels()[-1].set_horizontalalignment('right')
axl.get_yticklabels()[0].set_verticalalignment('bottom')
axl.get_yticklabels()[-1].set_verticalalignment('top')
set_tight(
fig, pad=0, w_pad=pad_subplots, h_pad=pad_subplots,
rect=(0, 0, rect_r, rect_t)
)
@plot
def posterior():
_posterior(ignore={'etas_hrg'})
@plot
def posterior_shear():
_posterior(
scale=1.35, pad_subplots=.1, rect_t=.97,
params={'etas_min', 'etas_slope', 'etas_crv'}
)
@plot
def posterior_bulk():
_posterior(
scale=1.35, pad_subplots=.1, rect_t=.97,
params={'zetas_max', 'zetas_width', 'zetas_t0'}
)
@plot
def posterior_p():
"""
Distribution of trento p parameter with annotations for other models.
"""
plt.figure(figsize=figsize(.5, .5))
ax = plt.axes()
data = mcmc.Chain().load('trento_p').ravel()
counts, edges = np.histogram(data, bins=50)
x = (edges[1:] + edges[:-1]) / 2
y = counts / counts.max()
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 10*x.size)
y = interp(x)
ax.plot(x, y, color=plt.cm.Blues(0.8))
ax.fill_between(x, y, color=plt.cm.Blues(0.15), zorder=-10)
ax.set_xlabel('$p$')
for spine in ax.spines.values():
spine.set_visible(False)
for label, x, err in [
('KLN', -.67, .01),
('EKRT /\nIP-Glasma', 0, .1),
('Wounded\nnucleon', 1, None),
]:
args = ([x], [0], 'o') if err is None else ([x - err, x + err], [0, 0])
ax.plot(*args, lw=4, ms=4, color=offblack, alpha=.58, clip_on=False)
if label.startswith('EKRT'):
x -= .29
ax.text(x, .05, label, va='bottom', ha='center')
ax.text(.1, .8, format_ci(data))
ax.set_xticks(np.arange(-10, 11, 5)/10)
ax.set_xticks(np.arange(-75, 76, 50)/100, minor=True)
xm = 1.2
ax.set_xlim(-xm, xm)
ax.add_artist(
patches.FancyArrowPatch(
(-xm, 0), (xm, 0),
linewidth=plt.rcParams['axes.linewidth'],
arrowstyle=patches.ArrowStyle.CurveFilledAB(
head_length=3, head_width=1.5
),
facecolor=offblack, edgecolor=offblack,
clip_on=False, zorder=100
)
)
ax.set_yticks([])
ax.set_ylim(0, 1.01*y.max())
set_tight(pad=0)
def posterior_parameter(parameter, label, xticks, bins=50):
"""
Marginal distribution of a single parameter.
"""
plt.figure(figsize=figsize(.5, .75))
ax = plt.axes()
data = mcmc.Chain().load(parameter).ravel()
counts, edges = np.histogram(data, bins=bins)
x = (edges[1:] + edges[:-1]) / 2
y = counts / counts.max()
interp = PchipInterpolator(x, y)
x = np.linspace(x[0], x[-1], 100*x.size)
y = interp(x)
ax.plot(x, y, color=plt.cm.Blues(0.8))
ax.fill_between(x, y, color=plt.cm.Blues(0.15), zorder=-10)
ax.spines['left'].set_visible(False)
ax.set_xlabel(label)
ax.set_xticks(xticks)
ax.set_yticks([])
ax.set_ylim(0, 1.01*y.max())
set_tight(pad=0)
@plot
def posterior_parton_number():
"""
Posterior distribution on the number of constituents.
"""
plt.figure(figsize=figsize(.5, .75))
ax = plt.axes()
data = mcmc.Chain().load('parton_number').ravel()
counts, edges = np.histogram(data, bins=50)
x = (edges[1:] + edges[:-1]) / 2
y = counts / counts.max()
spline = UnivariateSpline(x, y, s=1e-3)
x = np.linspace(data.min(), data.max(), 1000)
y = spline(x)
plt.plot([x[0], x[0]], [0, y[0]], linestyle='dashed',
color=offblack, clip_on=False)
plt.plot([x[-1], x[-1]], [0, y[-1]], linestyle='dashed',
color=offblack, clip_on=False)
ax.plot(x, y, color=plt.cm.Blues(0.8))
ax.fill_between(x, y, color=plt.cm.Blues(0.15), zorder=-10)
ax.spines['left'].set_visible(False)
ax.set_xlabel('Constituent number $n_c$')
ax.set_xticks([1, 3, 5, 7, 9])
ax.set_yticks([])
ax.set_ylim(0, 1.01*y.max())
set_tight(pad=0)
@plot
def posterior_freestreaming():
posterior_parameter(
'tau_fs',
'Free streaming time [fm/$c$]',
[.1, .8, 1.5]
)
def _region(ax, name, chain, cmap=plt.cm.Blues, legend=False, title=False):
"""
Visual estimate (posterior median and credible region) of
temperature-dependent shear or bulk viscosity.
"""
var, keys, function, ymax = dict(
shear=(
'eta',
['min', 'slope', 'crv'],
lambda T, m, s, c: m + s*(T - Tc)*(T/Tc)**c,
.4
),
bulk=(
'zeta',
['max', 'width', 't0'],
lambda T, m, w, T0: m / (1 + ((T - T0)/w)**2),
.08
),
)[name]
Tmin, Tmax = .150, .300
Tc = .154
samples = chain.load(
*['{}s_{}'.format(var, k) for k in keys], thin=1
)
T = np.linspace(Tc if name == 'shear' else Tmin, Tmax, 1000)
ax.plot(
T, function(T, *np.median(samples, axis=0)),
color=cmap(.75), label='Posterior median'
)
Tsparse = np.linspace(T[0], T[-1], 25)
intervals = [
PchipInterpolator(Tsparse, y)(T)
for y in np.array([
mcmc.credible_interval(function(t, *samples.T))
for t in Tsparse
]).T
]
ax.fill_between(
T, *intervals,
color=cmap(.3), label='90% credible region'
)
ax.set_xlim(Tmin, Tmax)
ax.set_ylim(0, ymax)
auto_ticks(ax, nbins=5)
ax.xaxis.set_major_formatter(
ticker.FuncFormatter(lambda x, pos: int(1000*x))
)
ax.set_xlabel('Temperature [MeV]')
ax.set_ylabel(r'$\{}/s$'.format(var))
if title:
ax.set_title(name.capitalize() + ' viscosity')
if legend:
ax.legend(loc=legend if isinstance(legend, str) else 'best')
if name == 'shear':
ax.axhline(
1/(4*np.pi),
color='.5', linewidth=plt.rcParams['ytick.major.width']
)
ax.text(Tmax, .07, r'$1/4\pi$', va='top', ha='right', color='.3')
@plot
def region_shear():
"""
Region plot for eta/s.
"""
chain = mcmc.Chain()
fig, ax = plt.subplots(figsize=figsize(.5, .65))
_region(ax, 'shear', chain, legend='upper left')
set_tight(fig)
@plot
def region_bulk():
"""
Region plot for zeta/s.
"""
chain = mcmc.Chain()
fig, ax = plt.subplots(figsize=figsize(.5, .65))
_region(ax, 'bulk', chain, legend='upper right')
set_tight(fig)
@plot
def region_shear_bulk(cmap=plt.cm.Blues):
"""
Visual estimates (posterior median and credible region) of the
temperature-dependent shear and bulk viscosity.
"""
fig, axes = plt.subplots(ncols=2, figsize=figsize(1, .4))
ax_shear, ax_bulk = axes
Tmin, Tmax = .150, .300
Tc = .154
prj_path = Path('/home/morelandjs/research/chains/jonah_nature.hdf')
energies = (mcmc.Chain(prj_path), plt.cm.Blues, .6, 'Pb-Pb 2.76, 5.02 TeV')
nuclei = (mcmc.Chain(), plt.cm.Oranges, .25, 'p-Pb, Pb-Pb 5.02 TeV')
handles = []
for zorder, (chain, cmap, darkness, label) in enumerate([nuclei, energies]):
for (name, var, keys, function, ymax), ax in zip([
('shear', 'eta', ['min', 'slope', 'crv'],
lambda T, m, s, c: m + s*(T - Tc)*(T/Tc)**c,
.4),
('bulk', 'zeta', ['max', 'width', 't0'],
lambda T, m, w, T0: m / (1 + ((T - T0)/w)**2),
.08)
], axes):
samples = chain.load(*['{}s_{}'.format(var, k) for k in keys], thin=1)
T = np.linspace(Tc if name == 'shear' else Tmin, Tmax, 1000)
ax.plot(
T, function(T, *np.median(samples, axis=0)),
color=cmap(.75), label='Posterior median', zorder=zorder
)
Tsparse = np.linspace(T[0], T[-1], 25)
intervals = [
PchipInterpolator(Tsparse, y)(T)
for y in np.array([
mcmc.credible_interval(function(t, *samples.T))
for t in Tsparse
]).T
]
ax.fill_between(
T, *intervals,
color=cmap(darkness), label='90% credible region',
alpha=.6 if zorder == 1 else 1, lw=0, zorder=zorder
)
ax.set_xlim(Tmin, Tmax)
ax.set_ylim(0, ymax)
auto_ticks(ax, nbins=5)
ax.xaxis.set_major_formatter(
ticker.FuncFormatter(lambda x, pos: int(1000*x))
)
ax.set_xlabel('Temperature [MeV]')
ax.set_ylabel(r'$\{}/s$'.format(var))
ax.set_title(name.capitalize() + ' viscosity')
if name == 'shear':
ax.axhline(
1/(4*np.pi),
color='.5', linewidth=plt.rcParams['ytick.major.width']
)
ax.text(Tmax, .07, r'$1/4\pi$', va='top', ha='right', color='.3')
line = lines.Line2D([], [], color=cmap(.8))
band = patches.Patch(color=cmap(.3))
handles.append((band, line))
line = lines.Line2D([], [], color=offblack, label='Posterior median')
band = patches.Patch(color='.85', label='90% credible region')
ax_shear.legend(handles=[band, line], loc='upper left')
labels = ["p-Pb, Pb-Pb 5.02 TeV", "Pb-Pb 2.76, 5.02 TeV"]
ax_bulk.legend(
handles, labels, loc='upper right', markerfirst=False,
bbox_to_anchor=(1, 1.05), title='Bayesian calibration on:'
)
set_tight(w_pad=.2)
region_style = dict(color='.93', zorder=-100)
Tc = .154
@plot
def design():
"""
Projection of a LH design into two dimensions.
"""
fig = plt.figure(figsize=figsize(.5, 1))
ratio = 5
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_j = fig.add_subplot(gs[1:, :-1])
ax_x = fig.add_subplot(gs[0, :-1], sharex=ax_j)
ax_y = fig.add_subplot(gs[1:, -1], sharey=ax_j)
d = Design(systems[0])
keys = ('etas_min', 'etas_slope')
indices = tuple(d.keys.index(k) for k in keys)
x, y = (d.array[:, i] for i in indices)
ax_j.plot(x, y, 'o', color=plt.cm.Blues(0.75), mec='white', mew=.3)
hist_kw = dict(bins=30, color=plt.cm.Blues(0.4), edgecolor='white', lw=.5)
ax_x.hist(x, **hist_kw)
ax_y.hist(y, orientation='horizontal', **hist_kw)
for ax in fig.axes:
ax.tick_params(top=False, right=False)
spines = ['top', 'right']
if ax is ax_x:
spines += ['left']
elif ax is ax_y:
spines += ['bottom']
for spine in spines:
ax.spines[spine].set_visible(False)
for ax_name in 'xaxis', 'yaxis':
getattr(ax, ax_name).set_ticks_position('none')
auto_ticks(ax_j)
for ax in ax_x, ax_y:
ax.tick_params(labelbottom=False, labelleft=False)
for i, xy in zip(indices, 'xy'):
for f, l in [('lim', d.range), ('label', d.labels)]:
getattr(ax_j, 'set_{}{}'.format(xy, f))(l[i])
set_tight(fig)
@plot
def gp():
"""
Conditioning a Gaussian process.
"""
fig, axes = plt.subplots(
figsize=figsize(.5, 2*aspect),
nrows=2, sharex='col'
)
def dummy_optimizer(obj_func, initial_theta, bounds):
return initial_theta, 0.
gp = GPR(1.*kernels.RBF(.8), optimizer=dummy_optimizer)
def sample_y(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
return gp.sample_y(*args, **kwargs)
x = np.linspace(0, 5, 1000)
X = x[:, np.newaxis]
x_train = np.linspace(.5, 4.5, 4)
X_train = x_train[:, np.newaxis]
for title, ax in zip(['Random functions', 'Conditioned on data'], axes):
if title.startswith('Conditioned'):
y = sample_y(X_train, random_state=23158).squeeze()
y -= .5*(y.max() + y.min())
gp.fit(X_train, y)
training_data, = plt.plot(x_train, y, 'o', color='.3', zorder=50)
for s, c in zip(
sample_y(X, n_samples=4, random_state=34576).T,
['Blues', 'Greens', 'Oranges', 'Purples']
):
ax.plot(x, s, color=getattr(plt.cm, c)(.6))
mean, std = gp.predict(X, return_std=True)
std = ax.fill_between(x, mean - std, mean + std, color='.92')
mean, = ax.plot(x, mean, color='.42', dashes=(3.5, 1.5))
ax.set_ylim(-2, 2)
ax.set_ylabel('Output')
auto_ticks(ax)
ax.set_title(title, y=.9)
ax.set_xlabel('Input')
ax.legend(*zip(*[
(mean, 'Mean prediction'),
(std, 'Uncertainty'),
(training_data, 'Training data'),
]), loc='lower left')
set_tight(fig, h_pad=1)
@plot
def pca():
fig = plt.figure(figsize=figsize(.5, aspect=1))
ratio = 5
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_j = fig.add_subplot(gs[1:, :-1])
ax_x = fig.add_subplot(gs[0, :-1], sharex=ax_j)
ax_y = fig.add_subplot(gs[1:, -1], sharey=ax_j)
x, y = (
model.data['PbPb5020'][obs][subobs]['Y'][:, 3]
for obs, subobs in [('dNch_deta', None), ('vnk', (2, 2))]
)
xlabel = r'$dN_\mathrm{ch}/d\eta$'
ylabel = r'$v_2\{2\}$'
xlim = .5, 6
ylim = -6.5, 0
cmap = plt.cm.Blues
ax_j.plot(x, y, 'o', color=cmap(.75), mec='white', mew=.25, zorder=10)
for d, ax, orientation in [(x, ax_x, 'vertical'), (y, ax_y, 'horizontal')]:
ax.hist(
d, bins=20,
orientation=orientation, color=cmap(.4), edgecolor='white'
)
xy = np.column_stack([x, y])
xymean = xy.mean(axis=0)
xystd = xy.std(axis=0)
xy -= xymean
xy /= xystd
pca = PCA().fit(xy)
pc = (
6 * xystd *
pca.explained_variance_ratio_[:, np.newaxis] *
pca.components_
)
for w, p in zip(pca.explained_variance_ratio_, pc):
if np.all(p < 0):
p *= -1
ax_j.annotate(
'', xymean + p, xymean, zorder=20,
arrowprops=dict(
arrowstyle='->', shrinkA=0, shrinkB=0,
color=offblack, lw=.7
)
)
ax_j.text(
*(xymean + p + (.8, .002)*np.sign(p)), s='{:.0f}%'.format(100*w),
color=offblack, ha='center', va='top' if p[1] < 0 else 'bottom',
zorder=20
)
for ax in fig.axes:
ax.tick_params(top=False, right=False)
spines = ['top', 'right']
if ax is ax_x:
spines += ['left']
elif ax is ax_y:
spines += ['bottom']
for spine in spines:
ax.spines[spine].set_visible(False)
for ax_name in 'xaxis', 'yaxis':
getattr(ax, ax_name).set_ticks_position('none')
for ax in ax_x, ax_y:
ax.tick_params(labelbottom=False, labelleft=False)
auto_ticks(ax_j, nbins=5, prune='upper')
ax_j.set_xlim(xlim)
ax_j.set_ylim(ylim)
ax_j.set_xlabel(xlabel)
ax_j.set_ylabel(ylabel)
set_tight(pad=.1, h_pad=.3, w_pad=.3)
default_system = 'PbPb5020'
@plot
def pca_vectors_variance(system=default_system):
"""
PCA vectors and explained variance.
"""
fig, axes = plt.subplots(
figsize=figsize(1.2, aspect=.4),
ncols=2, gridspec_kw=dict(width_ratios=[5, 1])
)
emu = emulators[system]
pca = emu.pca
ax = axes[0]
for n, (pc, var) in enumerate(zip(
pca.components_[:3], pca.explained_variance_ratio_
), start=1):
ax.plot(pc, 'o', label='PC {} ({:.0f}%)'.format(n, 100*var))
ax.axhline(
0,
color='.5', linewidth=plt.rcParams['ytick.major.width'],
zorder=-100
)
x = -.5
ticks = []
ticklabels = []
for obs, subobslist in emu.pPb5020:
for subobs in subobslist:
i = model.data[system][obs][subobs]['Y'].shape[1]
ticks.append(x + .5*i)
ticklabels.append(obs_label(obs, subobs))
x += i
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.tick_params(
'x',
bottom=False, labelbottom=False,
labeltop=True, pad=1
)
for t in ax.get_xticklabels():
t.set_verticalalignment('baseline')
ax.set_ylabel('PCA coefficient', labelpad=1)
auto_ticks(ax, 'y', nbins=4, minor=2)
ax.legend(loc='best', handletextpad=0)
ax = axes[1]
npc = 10
ax.plot(
np.arange(1, 1 + npc),
pca.explained_variance_ratio_.cumsum()[:npc],
'-o',
)
ax.set_xlim(.5, npc + .5)
ax.set_ylim(0, 1)
majorticks = [1, 4, 7, 10]
ax.set_xticks(majorticks)
ax.set_xticks(sorted(set(range(1, npc)) - set(majorticks)), minor=True)
auto_ticks(ax, 'y', nbins=5, minor=2)
ax.xaxis.set_ticks_position('top')
ax.set_xlabel('Number of PC')
ax.set_ylabel('Cumulative explained variance fraction')
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
for ax in axes:
for s in ax.spines.values():
s.set_visible(True)
set_tight(w_pad=.5)
def boxplot(
ax, percentiles, x=0, y=0, box_width=1,
line_width=plt.rcParams['lines.linewidth'],
color=(0, 0, 0), alpha=.6, zorder=10, vert=True
):
"""
Draw a minimal boxplot.
`percentiles` must be a np.array of five numbers:
whisker_low, quartile_1, median, quartile_3, whisker_high
"""
pl, q1, q2, q3, ph = percentiles + y
# IQR box
ax.add_patch(patches.Rectangle(
xy=(x - .5*box_width, q1),
width=box_width, height=(q3 - q1),
color=color, alpha=alpha, lw=0, zorder=zorder
))
# median line
ax.plot(
[x - .5*box_width, x + .5*box_width], 2*[q2],
lw=line_width, solid_capstyle='butt', color=color,
zorder=zorder + 1
)
# whisker lines
for y in [[q1, pl], [q3, ph]]:
ax.plot(
2*[x], y, lw=line_width, solid_capstyle='butt',
color=color, alpha=alpha, zorder=zorder
)
def validation_data(system, n_splits=20):
"""
Partition the design into training and test data using K-fold
cross validation. Train the emulator on each fold (subset of the design)
and return the emulator prediction (mean and cov) for each fold as a list.
"""
design = Design(system)
kf = KFold(n_splits=n_splits)
npc = {'pPb5020': 4, 'PbPb5020': 8}[system]
mean_folds = []
cov_folds = []
cachefile = Path(cachedir, 'validation', '{}.pkl'.format(system))
cachefile.parent.mkdir(parents=True, exist_ok=True)
if cachefile.exists():
return pickle.load(cachefile.open(mode='rb'))
for train_index, test_index in kf.split(design.array):
test_points, train_points = [
[design.points[index] for index in indices]
for indices in (test_index, train_index)
]
emu = Emulator(system, exclude_points=test_points, npc=npc)
test_mean, test_cov = emu.predict(
design.array[test_index], return_cov=True
)
mean_folds.append(test_mean)
cov_folds.append(test_cov)
pickle.dump((mean_folds, cov_folds), cachefile.open(mode='wb'))
return mean_folds, cov_folds
def validation_all(system):
"""
Emulator validation: normalized residuals and RMS error for each
observable.
"""
fig, (ax_box, ax_rms) = plt.subplots(
nrows=2, figsize=figsize(1, aspect=.4),
gridspec_kw=dict(height_ratios=[1.5, 1])
)
index = 1
ticks = []
ticklabels = []
plots = _observables_plots()
model_data = model.data[system]
mean_folds, cov_folds = validation_data(system)
for plot in plots:
for (obs, subobs, opts) in plot['subplots']:
color = obs_color(obs, subobs)
try:
# model data
Y = model_data[obs][subobs]['Y']
# emulator predictions
Y_ = np.concatenate(
[mean[obs][subobs] for mean in mean_folds], axis=0
)
S_ = np.concatenate(
[np.sqrt(cov[(obs, subobs), (obs, subobs)].T.diagonal())
for cov in cov_folds], axis=0
)
except KeyError:
continue
Z = (Y_ - Y)/S_
for i, percentiles in enumerate(
np.percentile(Z, [10, 25, 50, 75, 90], axis=0).T,
start=index
):
boxplot(ax_box, percentiles, x=i, box_width=.8, color=color)
Ymin, Ymax = np.percentile(Y, (.5, 99.5))
Yerr = (Y_ - Y)/(Ymax - Ymin)
rms = 100*np.sqrt(np.square(Yerr).mean(axis=0))
ax_rms.plot(
np.arange(index, index + rms.size), rms, 'o', color=color
)
ticks.append(.5*(index + i))
ticklabels.append(obs_label(obs, subobs))
index = i + 2
ax_box.set_xticks(ticks)
ax_box.tick_params(axis='x', pad=-6)
ax_box.set_xticklabels(ticklabels)
ax_box.tick_params('x', bottom=False, labelsize=plt.rcParams['font.size'])
ax_box.set_ylim(-2.25, 2.25)
ax_box.set_ylabel(r'Normalized residuals')
nuclei, roots = parse_system(system)
ax_box.set_title('{}-{} {} TeV'.format(*nuclei, roots/1000))
q, p = np.sqrt(2) * special.erfinv(2*np.array([.75, .90]) - 1)
ax_box.axhspan(-q, q, color='.85', zorder=-20)
for s in [-1, 0, 1]:
ax_box.axhline(s*p, color='.5', zorder=-10)
ax_q = ax_box.twinx()
ax_q.set_ylim(ax_box.get_ylim())
ax_q.set_yticks([-p, -q, 0, q, p])
ax_q.set_yticklabels([10, 25, 50, 75, 90])
ax_q.tick_params('y', right=False)
ax_q.set_ylabel(
'Normal quantiles',
fontdict=dict(rotation=-90),
labelpad=4*plt.rcParams['axes.labelpad']
)
ax_rms.set_xticks([])
ax_rms.set_yticks(np.arange(0, 21, 5))
ax_rms.set_ylim(0, 20)
ax_rms.set_ylabel('Frac. error')
for y in ax_rms.get_yticks():
ax_rms.axhline(y, color='.5', zorder=-10)
for ax in fig.axes:
ax.set_xlim(0, index - 1)
ax.spines['bottom'].set_visible(False)
@plot
def validation_pPb5020():
validation_all('pPb5020')
@plot
def validation_PbPb5020():
validation_all('PbPb5020')
@plot
def validation_example(
system='PbPb5020',
obs='dNch_deta', subobs=None,
label=r'$dN_\mathrm{ch}/d\eta$',
cent=(20, 30)
):
"""
Example of emulator validation for a single observable. Scatterplot of
model calculations vs emulator predictions with histogram and boxplot of
normalized residuals.
"""
fig, axes = plt.subplots(
ncols=2, figsize=figsize(.8, aspect=.6),
gridspec_kw=dict(width_ratios=[3, 1])
)
ax_scatter, ax_hist = axes
# model data
model_data = model.data[system]
vdata = model_data[obs][subobs]
cent_slc = (slice(None), vdata['cent'].index(cent))
y = vdata['Y'][cent_slc]
# emulator predictions
mean_folds, cov_folds = validation_data(system)
y_ = np.concatenate(
[mean[obs][subobs][cent_slc] for mean in mean_folds], axis=0
)
std_ = np.concatenate(
[np.sqrt(cov[(obs, subobs), (obs, subobs)].T.diagonal()[cent_slc])
for cov in cov_folds], axis=0
)
color = obs_color(obs, subobs)
alpha = .6
ax_scatter.set_aspect('equal')
ax_scatter.errorbar(
y_, y, xerr=std_,
fmt='o', mew=.2, mec='white',
color=color, alpha=alpha
)
dy = .03*y.ptp()
x = [y.min() - dy, y.max() + dy]
ax_scatter.plot(x, x, color='.4')
ax_scatter.set_xlabel('Emulator prediction')
ax_scatter.set_ylabel('Model calculation')
ax_scatter.text(
.04, .96, '{} {}–{}'.format(label, *cent),
horizontalalignment='left', verticalalignment='top',
transform=ax_scatter.transAxes
)
zmax = 3.5
zrange = (-zmax, zmax)
z = (y_ - y)/std_
ax_hist.hist(
z, bins=30, range=zrange, density=True, histtype='stepfilled',
orientation='horizontal', color=color, alpha=alpha
)
x = np.linspace(-zmax, zmax, 1000)
ax_hist.plot(np.exp(-.5*x*x)/np.sqrt(2*np.pi), x, color='.25')
box_x = .75
box_width = .1
boxplot(
ax_hist, np.percentile(z, [10, 25, 50, 75, 90]),
x=box_x, box_width=box_width,
line_width=2*plt.rcParams['lines.linewidth'],
color=color, alpha=alpha
)
guide_width = 2.5*box_width
q, p = np.sqrt(2) * special.erfinv(2*np.array([.75, .90]) - 1)
ax_hist.add_patch(patches.Rectangle(
xy=(box_x - .5*guide_width, -q),
width=guide_width, height=2*q,
color='.85', zorder=-20
))
for s in [-1, 0, 1]:
ax_hist.plot(
[box_x - .5*guide_width, box_x + .5*guide_width], 2*[s*p],
color='.5', zorder=-10
)
ax_hist.set_ylim(zrange)
ax_hist.spines['bottom'].set_visible(False)
ax_hist.tick_params('x', bottom=False, labelbottom=False)
ax_hist.set_ylabel('Normalized residuals')
ax_q = ax_hist.twinx()
ax_q.spines['bottom'].set_visible(False)
ax_q.set_ylim(ax_hist.get_ylim())
ax_q.set_yticks([-p, -q, 0, q, p])
ax_q.set_yticklabels([10, 25, 50, 75, 90])
ax_q.tick_params('y', right=False)
ax_q.set_ylabel(
'Normal quantiles',
fontdict=dict(rotation=-90),
labelpad=4*plt.rcParams['axes.labelpad']
)
set_tight(fig, rect=[.02, 0, 1, 1])
@plot
def validation_example_vert(
system='PbPb5020',
obs='dNch_deta', subobs=None,
label=r'$dN_\mathrm{ch}/d\eta$',
cent=(20, 30)
):
"""
Example of emulator validation for a single observable. Scatterplot of
model calculations vs emulator predictions with histogram and boxplot of
normalized residuals.
"""
fig, axes = plt.subplots(
nrows=2, figsize=figsize(.5, aspect=1.3),
gridspec_kw=dict(height_ratios=[1, 3]),
)
ax_hist, ax_scatter = axes
# model data
model_data = model.data[system]
vdata = model_data[obs][subobs]
cent_slc = (slice(None), vdata['cent'].index(cent))
y = vdata['Y'][cent_slc]
# emulator predictions
mean_folds, cov_folds = validation_data(system)
y_ = np.concatenate(
[mean[obs][subobs][cent_slc] for mean in mean_folds], axis=0
)
std_ = np.concatenate(
[np.sqrt(cov[(obs, subobs), (obs, subobs)].T.diagonal()[cent_slc])
for cov in cov_folds], axis=0
)
color = obs_color(obs, subobs)
alpha = .6
ax_scatter.set_aspect('equal')
ax_scatter.errorbar(
y_, y, xerr=std_,
fmt='o', mew=.2, mec='white',
color=color, alpha=alpha
)
dy = .03*y.ptp()
x = [y.min() - dy, y.max() + dy]
ax_scatter.plot(x, x, color='.4')
ax_scatter.set_xlabel('Emulator prediction')
ax_scatter.set_ylabel('Model calculation')
ax_scatter.text(
.96, .06, '{} {}–{}%'.format(label, *cent),
horizontalalignment='right', verticalalignment='bottom',
transform=ax_scatter.transAxes
)
zmax = 3.5
zrange = (-zmax, zmax)
z = (y_ - y)/std_
ax_hist.hist(
z, bins=30, range=zrange, density=True, histtype='stepfilled',
color=color, alpha=alpha
)
x = np.linspace(-zmax, zmax, 1000)
ax_hist.plot(x, np.exp(-.5*x*x)/np.sqrt(2*np.pi), color='.25')
box_y = .75
box_width = .1
# percentiles
pl, q1, q2, q3, ph = np.percentile(z, [10, 25, 50, 75, 90])
# IQR box
ax_hist.add_patch(patches.Rectangle(
xy=(q1, box_y - .5*box_width),
width=(q3 - q1), height=box_width,
color=color, alpha=alpha, lw=0, zorder=1
))
# median line
ax_hist.plot(
2*[q2], [box_y - .5*box_width, box_y + .5*box_width],
lw=2*plt.rcParams['lines.linewidth'], solid_capstyle='butt',
color=color, zorder=2
)
# whisker lines
for x in [[pl, q1], [q3, ph]]:
ax_hist.plot(
x, 2*[box_y], lw=2*plt.rcParams['lines.linewidth'],
solid_capstyle='butt', color=color, alpha=alpha, zorder=1
)
guide_width = 2.5*box_width
q, p = np.sqrt(2) * special.erfinv(2*np.array([.75, .90]) - 1)
ax_hist.add_patch(patches.Rectangle(
xy=(-q, box_y - .5*guide_width),
width=2*q, height=guide_width,
color='.85', zorder=-20
))
for s in [-1, 0, 1]:
ax_hist.plot(
2*[s*p], [box_y - .5*guide_width, box_y + .5*guide_width],
color='.5', zorder=-10
)
ax_hist.set_xlim(zrange)
ax_hist.spines['left'].set_visible(False)
ax_hist.tick_params('y', left=False, labelleft=False)
ax_hist.set_xlabel('Normalized residuals')
ax_q = ax_hist.twiny()
ax_q.spines['left'].set_visible(False)
ax_q.set_xlim(ax_hist.get_xlim())
ax_q.set_xticks([-p, -q, 0, q, p])
ax_q.set_xticklabels([10, 25, 50, 75, 90])
ax_q.tick_params('x', top=False)
ax_q.set_xlabel(
'Normal quantiles',
labelpad=2*plt.rcParams['axes.labelpad']
)
set_tight(fig)
@plot
def correlation_matrices(system=default_system):
"""
Correlation (normalized covariance) matrices for model and experiment.
"""
chain = mcmc.Chain()
emu = emulators[system]
emu_slices = [
(obs, subobs, slc)
for obs, subobs_slc in emu._slices.items()
for subobs, slc in subobs_slc.items()
]
design = Design(system)
X = np.random.uniform(design.min, design.max).reshape(1, -1)
emu_cov = emu.predict(X, return_cov=True)[1].array[0]
fig, axes = plt.subplots(
ncols=3, figsize=figsize(1, .47),
gridspec_kw=dict(width_ratios=[1, 1, .02])
)
for (cov, slices, title), ax in zip([
(emu_cov, emu_slices, 'Model (emulator)'),
(chain._expt_cov[system], chain._slices[system], 'Experiment'),
], axes):
s = np.sqrt(cov.diagonal())
img = ax.imshow(
cov / np.outer(s, s), vmin=-1, vmax=1,
interpolation='nearest', cmap='RdBu'
)
ticks = []
ticklabels = []
for obs, subobs, slc in slices:
ticks.append(.5*(slc.start + slc.stop - 1))
ticklabels.append(obs_label(obs, subobs))
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticklabels)
ax.set_yticklabels(ticklabels)
ax.set_title(title, y=1.05)
ax.tick_params(
bottom=False, top=False, left=False, right=False,
labelbottom=False, labeltop=True,
pad=0
)
for s in ax.spines.values():
s.set_visible(False)
axes[0].tick_params(labelleft=True)
for t in axes[1].get_yticklabels():
t.set_horizontalalignment('center')
t.set_x(-.05)
cax = axes[-1]
fig.colorbar(img, cax=cax, ticks=[-1, -.5, 0, .5, 1])
cax.set_aspect(40)
cax.yaxis.set_ticks_position('left')
cax.set_title('Corr', y=1.02, fontsize=fontsize['normal'])
set_tight(fig, rect=(0, 0, 1, .96))
@plot
def diag_pca(system=default_system):
"""
Diagnostic: histograms of principal components and scatterplots of pairs.
"""
Y = [g.y_train_ for g in emulators[default_system].gps]
n = len(Y)
ymax = np.ceil(max(np.fabs(y).max() for y in Y))
lim = (-ymax, ymax)
fig, axes = plt.subplots(nrows=n, ncols=n, figsize=2*(n,))
for y, ax in zip(Y, axes.diagonal()):
ax.hist(y, bins=30)
ax.set_xlim(lim)
for ny, nx in zip(*np.tril_indices_from(axes, k=-1)):
ax = axes[ny][nx]
ax.scatter(Y[nx], Y[ny])
ax.set_xlim(lim)
ax.set_ylim(lim)
axes[nx][ny].set_axis_off()
for i in range(n):
label = 'PC {}'.format(i)
axes[-1][i].set_xlabel(label)
axes[i][0].set_ylabel(label)
def _diag_emu(system=default_system, pcs=None, params=None, label_all=True):
"""
Diagnostic: plots of each principal component vs each input parameter,
overlaid by emulator predictions at several points in design space.
"""
gps = emulators[system].gps
pcs = (
range(len(gps)) if pcs is None else
[p if p >= 0 else (len(gps) + p) for p in pcs]
)
nrows = len(pcs)
design = Design(system)
if params is None:
params = design.keys
ncols = len(params)
fig, axes = plt.subplots(
nrows=nrows, ncols=ncols,
figsize=figsize((.5 if label_all else .375)*ncols, .62*nrows/ncols),
sharex=(False if label_all else 'col'),
sharey=(False if label_all else 'row')
)
ymax = np.ceil(2*max(np.fabs(gps[pc].y_train_).max() for pc in pcs))/2
ylim = (-ymax, ymax)
tmax = int(ymax)
yticksmajor = [-tmax, 0, tmax]
yticksminor = list(range(-tmax + 1, 0)) + list(range(1, tmax))
for pc, row in zip(pcs, axes):
gp = gps[pc]
y = gp.y_train_
for param, ax in zip(params, row):
i = design.keys.index(param)
x = gp.X_train_[:, i]
ax.plot(
x, y, 'o',
markersize=.4*plt.rcParams['lines.markersize'],
color='.7',
zorder=-30
)
xlim = design.range[i]
x = np.linspace(xlim[0], xlim[1], 100)
X = np.empty((x.size, gp.X_train_.shape[1]))
for r, c in [(.2, 'purple'), (.5, 'blue'), (.8, 'green')]:
X[:] = r*design.min + (1 - r)*design.max
X[:, i] = x
mean, std = gp.predict(X, return_std=True)
color = colors[c]
ax.plot(
x, mean,
linewidth=.8*plt.rcParams['lines.linewidth'],
color=color,
zorder=-10
)
ax.fill_between(
x, mean - std, mean + std,
lw=0, color=color, alpha=.3, zorder=-20
)
if param == 'parton_number':
xlim = (0, 10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
auto_ticks(ax, 'x', nbins=3, minor=2)
ax.set_yticks(yticksmajor)
ax.set_yticks(yticksminor, minor=True)
if label_all or ax.is_last_row():
ax.set_xlabel(design.labels[i])
if label_all or ax.is_first_col():
ax.set_ylabel('PC {}'.format(pc + 1))
ax.set_ylim(-4, 4)
set_tight(fig, w_pad=.5, h_pad=.25)
@plot
def diag_emu_all():
_diag_emu()
@plot
def diag_emu_partial():
_diag_emu(
pcs=[0, 2, -1],
params=['trento_p', 'tau_fs', 'etas_min'],
label_all=False
)
#@plot
def grid_error():
"""
Scatter plot observables calculated on a grid with grid scale = 0.2 against
observables calculated on a grid with grid scale = 0.1.
"""
obs_list = [
('dNch_deta', r'$dN_\mathrm{ch}/d\eta$'),
('mean_pT', r'mean $p_T$ [GeV]'),
('v2', r'$|Q_2|/M$'),
('v3', r'$|Q_3|/M$'),
]
fig, axes = plt.subplots(ncols=4, nrows=1, figsize=figsize(1, aspect=.3))
fine_design_points, coarse_design_points = (
[Path(
'/var/phy/project/nukeserv/jsm55',
'hic-events/qm18-grid-scale',
'grid-scale-{}'.format(gs),
'events/{}.dat'.format(p)
) for p in Design(default_system).points if int(p) != 228]
for gs in (.1, .2)
)
fine_events, coarse_events = [
[ev for p, ev in model.ModelData(default_system, *design_points).design_events]
for design_points in (fine_design_points, coarse_design_points)
]
def obs(event, name):
flow = event['flow']['cms']
TINY = 1e-12
return dict(
dNch_deta=event['dNch_deta'],
mean_pT=event['mean_pT']['pT'],
v2=np.absolute(flow['Qn'][1])/(flow['N'] + TINY),
v3=np.absolute(flow['Qn'][2])/(flow['N'] + TINY),
)[name]
for ax, (name, label) in zip(axes.flat, obs_list):
x = [obs(ev, name) for ev in itertools.chain(*fine_events)]
y = [obs(ev, name) for ev in itertools.chain(*coarse_events)]
ax.scatter(x, y, s=15, edgecolors='white', linewidths=.2)
xy_max = np.nanmax(
|
np.append(x, y)
|
numpy.append
|
import math
import itertools
import numpy as np
import pytest
import arim
import arim.geometry as g
DATASET_1 = dict(
# set1:
points1=g.Points.from_xyz(
np.array([0, 1, 1], dtype=np.float),
np.array([0, 0, 0], dtype=np.float),
np.array([1, 0, 2], dtype=np.float),
"Points1",
),
# set 2:
points2=g.Points.from_xyz(
np.array([0, 1, 2], dtype=np.float),
np.array([0, -1, -2], dtype=np.float),
np.array([0, 0, 1], dtype=np.float),
"Points2",
),
)
def test_are_points_aligned():
n = 10
z = np.arange(n, dtype=np.float64)
theta = np.deg2rad(30)
def make_points():
p = g.Points.from_xyz(
z * np.cos(theta), z * np.sin(theta), np.zeros((n,), dtype=np.float64)
)
return p
points = make_points()
are_aligned = g.are_points_aligned(points)
assert are_aligned
points = make_points()
points.x[0] = 666
are_aligned = g.are_points_aligned(points)
assert not are_aligned
points = make_points()
points.y[-1] -= 0.01
are_aligned = g.are_points_aligned(points)
assert not are_aligned
points = make_points()
are_aligned = g.are_points_aligned(g.Points(points[0:1]))
assert are_aligned # one point is always aligned
points = make_points()
are_aligned = g.are_points_aligned(g.Points(points[0:2]))
assert are_aligned # two points are always aligned
def test_rotations():
"""
Test rotation_matrix_x, rotation_matrix_y and rotation_matrix_z
"""
theta = np.deg2rad(30)
identity = np.identity(3)
# Check that rotations in one side and on the other side give the identity
(rot_x, rot_y, rot_z) = (
g.rotation_matrix_x,
g.rotation_matrix_y,
g.rotation_matrix_z,
)
for rot in (rot_x, rot_y, rot_z):
assert np.allclose(identity, rot(theta) @ rot(-theta))
assert np.allclose(identity, rot(-theta) @ rot(theta))
# Check the rotations of 90° are correct
v = np.array((1, 2, 3), dtype=float)
v_x = np.array((1, -3, 2), dtype=float)
v_y = np.array((3, 2, -1), dtype=float)
v_z = np.array((-2, 1, 3), dtype=float)
phi = np.pi / 2
assert np.allclose(v_x, rot_x(phi) @ v)
assert np.allclose(v_y, rot_y(phi) @ v)
assert np.allclose(v_z, rot_z(phi) @ v)
def test_norm2():
assert np.isclose(g.norm2(0.0, 0.0, 2.0), 2.0)
assert np.isclose(g.norm2(np.cos(0.3), np.sin(0.3), 0.0), 1.0)
x = np.array([np.cos(0.3), 1.0])
y = np.array([np.sin(0.3), 0.0])
z = np.array([0.0, 2.0])
assert np.allclose(g.norm2(x, y, z), [1.0, np.sqrt(5.0)])
# using out:
out = np.array(0.0, dtype=np.float)
out1 = g.norm2(0.0, 0.0, 2.0, out=out)
assert out is out1
assert np.isclose(out, 2.0)
def test_is_orthonormal():
assert g.is_orthonormal(np.identity(3))
assert g.is_orthonormal_direct(np.identity(3))
assert not (g.is_orthonormal(2.0 * np.identity(3)))
assert not (g.is_orthonormal_direct(2.0 * np.identity(3)))
a = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
assert g.is_orthonormal(a)
assert g.is_orthonormal_direct(a)
a = np.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]])
assert g.is_orthonormal(a)
assert not (g.is_orthonormal_direct(a))
@pytest.mark.parametrize("shape", [(), (5,), (5, 4), (5, 6, 7)])
def test_norm2_2d(shape):
x = np.random.uniform(size=shape)
y = np.random.uniform(size=shape)
z = np.zeros(shape)
# without out
out_2d = g.norm2_2d(x, y)
out_3d = g.norm2(x, y, z)
assert np.allclose(out_2d, out_3d, rtol=0.0)
# without out
buf_2d = np.zeros(shape)
buf_3d = np.zeros(shape)
out_2d = g.norm2_2d(x, y, out=buf_2d)
out_3d = g.norm2(x, y, z, out=buf_3d)
assert buf_2d is out_2d
assert buf_3d is out_3d
assert np.allclose(out_2d, out_3d, rtol=0.0)
_ISOMETRY_2D_DATA = [
(
np.array([0.0, 0.0]),
np.array([0.0, 1.0]),
np.array([2.0, 0.0]),
np.array([2.0, 1.0]),
),
(
np.array([66.0, 0.0]),
np.array([66.0, 1.0]),
np.array([77.0, 0.0]),
np.array([77 + np.cos(30), np.sin(30)]),
),
(
np.array([66.0, 0.0]),
np.array([66.0, 1.0]),
np.array([77.0, 0.0]),
np.array([77 + np.cos(-30), np.sin(-30)]),
),
]
@pytest.mark.parametrize("points", _ISOMETRY_2D_DATA)
def test_direct_isometry_2d(points):
A, B, Ap, Bp = points
M, P = g.direct_isometry_2d(A, B, Ap, Bp)
assert np.allclose(M @ A + P, Ap)
assert np.allclose(M @ B + P, Bp)
# Check barycentres
k1 = 0.3
k2 = 0.7
bary = k1 * A + k2 * B
assert np.allclose(M @ bary + P, k1 * Ap + k2 * Bp)
# Do we preserve the orientation?
rot90 = np.array([[0.0, -1.0], [1.0, 0.0]])
C = rot90 @ (B - A) + A
Cp2 = rot90 @ (Bp - Ap) + Ap
assert np.allclose(M @ C + P, Cp2)
_ISOMETRY_3D_DATA = [
(
np.asarray((10.0, 0.0, 0.0)),
np.asarray((1.0, 0.0, 0)),
np.asarray((0.0, 1.0, 0.0)),
np.asarray((0.0, 0.0, 66.0)),
np.asarray((np.cos(0.3), np.sin(0.3), 0)),
np.asarray((-np.sin(0.3), np.cos(0.3), 0)),
),
(
np.asarray((10.0, 0.0, 0.0)),
np.asarray((1.0, 0.0, 0)),
np.asarray((0.0, 0.0, 1.0)),
np.asarray((0.0, 0.0, 66.0)),
np.asarray((np.cos(0.3), np.sin(0.3), 0)),
np.asarray((-np.sin(0.3), np.cos(0.3), 0)),
),
(
np.asarray((10.0, 11.0, 12.0)),
np.asarray((0.0, 1.0, 0)),
np.asarray((0.0, 0.0, 1.0)),
np.asarray((22.0, 21.0, 20.0)),
np.asarray((np.cos(0.3), np.sin(0.3), 0)),
np.asarray((-np.sin(0.3), np.cos(0.3), 0)),
),
]
@pytest.mark.parametrize("points", _ISOMETRY_3D_DATA)
def test_direct_isometry_3d(points):
A, i_hat, j_hat, B, u_hat, v_hat = points
k_hat = np.cross(i_hat, j_hat)
w_hat = np.cross(u_hat, v_hat)
M, P = g.direct_isometry_3d(A, i_hat, j_hat, B, u_hat, v_hat)
# M is orthogonal
assert np.allclose(M @ M.T, np.identity(3))
assert np.allclose(M @ i_hat, u_hat)
assert np.allclose(M @ j_hat, v_hat)
assert np.allclose(M @ np.cross(i_hat, j_hat), np.cross(M @ i_hat, M @ j_hat))
k1, k2, k3 = np.random.uniform(size=3)
Q1 = A + k1 * i_hat + k2 * j_hat + k3 * k_hat
Q2 = B + k1 * u_hat + k2 * v_hat + k3 * w_hat
assert np.allclose(M @ Q1 + P, Q2)
def test_grid():
xmin = -10e-3
xmax = 10e-3
dx = 1e-3
ymin = 3e-3
ymax = 3e-3
dy = 1e-3
zmin = -10e-3
zmax = 0
dz = 1e-3
grid = g.Grid(xmin, xmax, ymin, ymax, zmin, zmax, (dx, dy, dz))
assert grid.shape == (grid.numx, grid.numy, grid.numz)
assert len(grid.xvect) == 21
assert grid.xmin == xmin
assert grid.xmax == xmax
assert np.isclose(grid.dx, dx)
assert grid.x.shape == (grid.numx, grid.numy, grid.numz)
assert len(grid.yvect) == 1
assert grid.ymin == ymin
assert grid.ymax == ymax
assert grid.dy is None
assert grid.y.shape == (grid.numx, grid.numy, grid.numz)
assert len(grid.zvect) == 11
assert grid.zmin == zmin
assert grid.zmax == zmax
assert np.isclose(grid.dz, dz)
assert grid.z.shape == (grid.numx, grid.numy, grid.numz)
assert grid.numpoints == 21 * 1 * 11
points = grid.to_1d_points()
assert isinstance(points, g.Points)
assert len(points) == grid.numpoints
grid_p = grid.to_oriented_points()
assert grid_p.points.shape == (grid.numpoints,)
assert grid_p.orientations.shape == (grid.numpoints, 3)
match = grid.points_in_rectbox(xmax=-9.5e-3)
assert match.sum() == 1 * grid.numy * grid.numz
@pytest.mark.parametrize("pixel_size", [0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
def test_grid_centred(pixel_size):
centre_x = 15.0
centre_y = 0.0
centre_z = 10.0
size_x = 5.0
size_y = 0.0
size_z = 10.0
grid = g.Grid.grid_centred_at_point(
centre_x, centre_y, centre_z, size_x, size_y, size_z, pixel_size
)
np.testing.assert_allclose(grid.xmin, 12.5)
np.testing.assert_allclose(grid.xmax, 17.5)
np.testing.assert_allclose(grid.yvect, [0.0])
np.testing.assert_allclose(grid.zmin, 5.0)
np.testing.assert_allclose(grid.zmax, 15.0)
assert grid.dy is None
assert grid.numx % 2 == 1
assert grid.numy == 1
assert grid.numz % 2 == 1
idx = grid.closest_point(centre_x, centre_y, centre_z)
np.testing.assert_allclose(
[grid.x.flat[idx], grid.y.flat[idx], grid.z.flat[idx]],
[centre_x, centre_y, centre_z],
)
@pytest.mark.parametrize("pixel_size", [0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
def test_grid_clone(pixel_size):
xmin = -10e-3
xmax = 10e-3
ymin = 3e-3
ymax = 3e-3
zmin = -10e-3
zmax = 0
orig_grid = g.Grid(xmin, xmax, ymin, ymax, zmin, zmax, pixel_size)
grid = g.Grid(
orig_grid.xmin,
orig_grid.xmax,
orig_grid.ymin,
orig_grid.ymax,
orig_grid.zmin,
orig_grid.zmax,
(orig_grid.dx, orig_grid.dy, orig_grid.dz),
)
assert orig_grid.numx == grid.numx
assert orig_grid.numy == grid.numy
assert orig_grid.numz == grid.numz
# shape, name, size
points_parameters = [
((), "TestPoints", 1),
((5,), "TestPoints", 5),
((5, 6), "TestPoints", 30),
((), None, 1),
((5,), None, 5),
((5, 6), None, 30),
]
points_parameters_ids = [
"one_named",
"vect_named",
"matrix_named",
"one_unnamed",
"vect_unnamed",
"matrix_unnamed",
]
def test_aspoints():
points = g.aspoints([1.0, 2.0, 3.0])
assert points.shape == ()
assert points.x == 1.0
assert points.y == 2.0
assert points.z == 3.0
class TestPoints:
@pytest.fixture(scope="class", params=points_parameters, ids=points_parameters_ids)
def points(self, request):
"""fixture points"""
shape, name, size = request.param
points, _ = self.make_points(shape, name, size)
return points
@staticmethod
def make_points(shape, name, size):
coords = np.arange(size * 3, dtype=np.float).reshape((*shape, 3))
raw_coords = np.copy(coords)
points = g.Points(coords, name)
return points, raw_coords
@pytest.mark.parametrize(
"shape, name, size", points_parameters, ids=points_parameters_ids
)
def test_points_basics(self, shape, name, size):
"""
Test basics attributes/properties of Points.
"""
points, raw_coords = self.make_points(shape, name, size)
assert points.shape == shape
assert points.ndim == len(shape)
assert points.x.shape == shape
assert points.y.shape == shape
assert points.z.shape == shape
np.testing.assert_allclose(points.coords, raw_coords)
np.testing.assert_allclose(points.x, raw_coords[..., 0])
np.testing.assert_allclose(points.y, raw_coords[..., 1])
np.testing.assert_allclose(points.z, raw_coords[..., 2])
assert points.size == size
assert points.name == name
if len(shape) == 0:
with pytest.raises(TypeError):
len(points)
else:
assert len(points) == shape[0]
for idx, p in points.enumerate():
np.testing.assert_allclose(p, (points.x[idx], points.y[idx], points.z[idx]))
# test __getitem__:
np.testing.assert_allclose(
points[idx], (points.x[idx], points.y[idx], points.z[idx])
)
# test iterator
for idx, p in zip(np.ndindex(shape), points):
np.testing.assert_allclose(p, (points.x[idx], points.y[idx], points.z[idx]))
assert len(list(iter(points))) == size
# Test hashability
d = {points: "toto"}
# Test str/rep
str(points)
repr(points)
@pytest.mark.parametrize(
"shape, name, size", points_parameters, ids=points_parameters_ids
)
def test_points_from_xyz(self, shape, name, size):
points, raw_coords = self.make_points(shape, name, size)
points2 = g.Points.from_xyz(points.x, points.y, points.z)
np.testing.assert_allclose(points2.coords, points.coords)
np.testing.assert_allclose(points2.x, points.x)
np.testing.assert_allclose(points2.y, points.y)
np.testing.assert_allclose(points2.z, points.z)
def test_spherical_coordinates(self):
"""
Cf. https://commons.wikimedia.org/wiki/File:3D_Spherical.svg on 2016-03-16
"""
# x, y, z
points = g.Points(
np.array(
[
[5.0, 0.0, 0.0],
[-5.0, 0.0, 0.0],
[0.0, 6.0, 0.0],
[0.0, -6.0, 0.0],
[0.0, 0.0, 7.0],
[0.0, 0.0, -7.0],
]
)
)
out = points.spherical_coordinates()
# r, theta, phi
expected = np.array(
[
[5.0, np.pi / 2, 0.0],
[5.0, np.pi / 2, np.pi],
[6.0, np.pi / 2, np.pi / 2],
[6.0, np.pi / 2, -np.pi / 2],
[7.0, 0.0, 0.0],
[7.0, np.pi, 0.0],
]
)
assert np.allclose(out.r, expected[:, 0])
assert np.allclose(out.theta, expected[:, 1])
assert np.allclose(out.phi, expected[:, 2])
@pytest.mark.parametrize(
"shape, name, size", points_parameters, ids=points_parameters_ids
)
def test_are_points_close(self, shape, name, size):
"""test geometry.are_points.close for different shapes"""
points, raw_coords = self.make_points(shape, name, size)
points2 = g.Points(raw_coords)
assert g.are_points_close(points, points)
assert g.are_points_close(points, points2)
try:
points2.x[0] += 666.0
except IndexError:
points2.x[()] += 666.0 # special case ndim=0
assert not (g.are_points_close(points, points2))
# use different shape
assert not (g.are_points_close(points, g.Points([1, 2, 3])))
@pytest.mark.parametrize(
"shape, shape_directions",
[[(), ()], [(5,), ()], [(5,), (5,)], [(5, 6), ()], [(5, 6), (5, 6)]],
ids=["one_one", "vect_one", "vect_vect", "mat_one", "mat_mat"],
)
def test_points_translate(self, shape, shape_directions):
"""
Test Points.translate for different shapes of Points and directions.
"""
coords = np.random.uniform(size=((*shape, 3)))
directions = np.random.uniform(size=((*shape_directions, 3)))
points = g.Points(coords.copy())
out_points = points.translate(directions.copy())
assert out_points.shape == shape, "the translated points have a different shape"
assert isinstance(out_points, g.Points)
idx_set = set()
for ((idx, in_p), out_p, idx_direction) in itertools.zip_longest(
points.enumerate(), out_points, np.ndindex(shape_directions), fillvalue=()
):
idx_set.add(idx)
assert in_p.shape == out_p.shape == directions[idx_direction].shape == (3,)
expected = in_p + directions[idx_direction]
np.testing.assert_allclose(
out_p,
expected,
err_msg="translation failed for idx={} and idx_direction={}".format(
idx, idx_direction
),
)
assert len(idx_set) == points.size, "The test does not check all points"
# this should be let all points invariant:
out_points = points.translate(np.array((0.0, 0.0, 0.0)))
assert g.are_points_close(
points, out_points
), "all points must be invariant (no translation)"
def test_norm2(self, points):
"""test Points.norm2"""
norm = points.norm2()
assert norm.shape == points.shape
for (idx, p) in points.enumerate():
x, y, z = p
np.testing.assert_allclose(norm[idx], math.sqrt(x * x + y * y + z * z))
def test_rotate_one_rotation(self, points):
"""Test Points.rotate() with one rotation for all points.
"""
rot = g.rotation_matrix_ypr(*np.deg2rad([10, 20, 30]))
# Case 1a: centre is None
out_points = points.rotate(rot, centre=None)
assert out_points.shape == points.shape
for ((idx, p_in), p_out) in zip(points.enumerate(), out_points):
expected = rot @ p_in
np.testing.assert_allclose(
p_out, expected, err_msg="rotation failed for idx={}".format(idx)
)
# Case 1b: centre is [0., 0., 0.] (should give the same answers)
out_points_b = points.rotate(rot, centre=np.array((0.0, 0.0, 0.0)))
assert g.are_points_close(out_points, out_points_b)
# Case 2: centre is not trivial
centre =
|
np.array((66.0, 77.0, 88.0))
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import copy
import utils
import measures as ms
def normal(nHyperplanes,nDimensions):
"""
Returns a set of hyperplanes with random orientations. nHyperplanes is
the number of hyperplanes to return, and nDimension the number of
dimensions of the space.
The hyperplanes are simply generated by setting their coordinates to
random values following a normal distribution.
"""
return np.random.normal(0,10,(nHyperplanes,nDimensions+1))
def doublePoint(nHyperplanes,nDimensions,distrib):
"""
Returns a set of hyperplanes with random orientations. nHyperplanes is
the number of hyperplanes to return, and nDimension the number of
dimensions of the space.
Here for each hyperplane, nDimensions random points are generated
following the distribution distrib, and the unique hyperplane passing
by all these points is kept.
"""
hyperplanes = []
for k in range(nHyperplanes):
points = np.array([utils.f(nDimensions,distrib,dataset) for n in range(nDimensions)])
hyperplanes.append( utils.hyperplaneFromPoints(points) )
return np.array(hyperplanes)
def poolSelect(nHyperplanes, nDimensions, pCentroids, pMeasure, poolSize, distrib, m,initType='doublePoint'):
'''
Initialize hyperplanes by generating a pool of poolSize random
configurations and selecting the one with lowest measure of distortion.
'''
for k in range(poolSize):
if initType == 'normal':
hps = normal(nHyperplanes,nDimensions)
elif initType == 'doublePoint':
hps = doublePoint(nHyperplanes,nDimensions,distrib)
else:
print("ERROR! invalid initialization type")
e = ms.measure(m,hps,pCentroids,pMeasure,distrib)
if k < 1:
minDistortion = e
minConfig = hps
else:
if minDistortion >= e:
minDistortion = e
minConfig = hps
return minConfig
def genetic(nHyperplanes, nDimensions, pCentroids, pMeasureInit, distrib, m,
nConfigs, pGenetic, crossover, mutation, order='dissimilarity', selection='rank',
initType='doublePoint', mutationPercentage=50):
'''
Generates a partially optimized configuration of hyperplanes, with the
goal of having an approximatively equal repartition of the input
distribution over the regions.
Here one hyperplane = one gene. At every iteration, one half of the old
configs is kept and used to generate the next generation by crossover.
-nConfigs is the number of configurations to generate and cross
-pGenetic is the number of iterations
-order is the type of ordering used to order hyperplanes before crossover
-crossover is the number of crossing points in the crossover operations
-mutation is the mutation method and intensity
-selection is the selection method used to chose the configs that are
reproduced
'''
print('start initialisation (genetic)')
configs = []
measures = []
geneticMeasureEvolution = []
# Step 1: generate random configurations
for k in range(nConfigs):
if initType == 'normal':
config = normal(nHyperplanes,nDimensions)
elif initType == 'doublePoint':
config = doublePoint(nHyperplanes,nDimensions,distrib)
else:
print("ERROR! invalid initialization type")
configs.append(config)
print('finished generating random configurations')
for k in range(pGenetic):
pMeasure = (k+1)*pMeasureInit
print('genetic: iteration '+str(k+1)+' of '+str(pGenetic))
measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs]
geneticMeasureEvolution.append( np.min(measures) )
# Step 2: selecting configs to reproduce
configs, measures = select(selection, configs, measures, percentageToKeep=80)
# Step 3: crossing configurations
newConfigs = cross(nDimensions, distrib, crossover, copy.deepcopy(configs), order, outputSize=nConfigs)
configs = np.concatenate((configs,newConfigs),axis=0)
# Step 4: mutation
if mutationPercentage == 100:
configs = mutateAll(mutation,configs)
else:
measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs]
configs = mutate(mutation, configs, measures, mutationPercentage)
# Step 5: return the best config
measures = [ms.measure(m, config, pCentroids, pMeasure, distrib) for config in configs]
print('final: ',np.min(measures))
print('end initialisation')
return configs[ np.argmin(measures) ], geneticMeasureEvolution
#print(genetic(3, 2, 1000, 10000, 'gaussian', 'mse',10, 5, 1, 1)) #test
## Genetic algorithm subfunctions
def select(selection, configs, measures, percentageToKeep=50):
'''
Returns the selected configurations that are kept for the next
generation.
percentageToKeep is the persentage of the total of configuration,
representing the configurations that will be kept.
'''
n = int(len(configs)*percentageToKeep/100)
if selection == 'rank':
# sort by distortion measure and keep the lowest
configs = [x for _,x in sorted(zip(measures,configs))]
measures = sorted(measures)
return configs[:n+1], measures[:n+1]
elif selection == 'random':
return configs[:n+1], measures[:n+1]
else:
print('ERROR: unknown selection method')
def cross(nDimensions, distrib, crossover, configs, order, outputSize='default'):
'''
Crosses the configs 'configs', repeating the operation 'outputSize' times,
with 'crossover' crossing points each time.
Hyperplanes can be ordered before the crossing.
'''
newGen = [] # next generation
if outputSize == 'default':
outputSize = len(configs)
if order == 'distanceToDistribCenter':
distribCenter = utils.distribCenter(nDimensions, distrib)
for k in range(len(configs)):
config = configs[k]
ranks = [ utils.distancePoint2Hp(distribCenter,hp) for hp in config ]
#order hyperplanes according to ranks
ordConfig = [hp for _,hp in sorted(zip(ranks,config))]
configs[k] = ordConfig
for k in range(outputSize):
# select 2 configs to cross
i,j = np.random.randint(len(configs)),np.random.randint(len(configs))
if order == 'distanceToDistribCenter' or order == 'noOrder':
crosspoints = np.random.randint(len(configs[0]), size=crossover)# chose crossing points
newConfig = []
useI = True # whether to include i or j genes
for l in range(len(configs[0])):
if useI:
newConfig.append(configs[i][l])
else:
newConfig.append(configs[j][l])
if l in crosspoints:
useI = not useI
elif order == 'dissimilarity':
dissimilarities, hpPairs = [], [] # list to store dissimilarity values and associated hyperplane pairs
for k in range(1,len(configs[i])):
for l in range(k):
dissimilarities.append(utils.dissimilarityHps(configs[j][l], configs[i][k], distrib))
hpPairs.append([k,l])
hpPairs = [hpPair for _,hpPair in sorted(zip(dissimilarities,hpPairs))]
newConfig = configs[i]
for pair in hpPairs[:int(len(hpPairs)/2)]: #swap the most similar half of hyperplane pairs
newConfig[pair[0]] = configs[j][pair[1]]
elif order == 'completeRandom':
newConfig = []
for l in range(len(configs[0])):
nextHp = configs[i][l] if np.random.uniform() > 0.5 else configs[j][l]
newConfig.append(nextHp)
newGen.append(newConfig)
return newGen
def mutateAll(mutation, configs):
'''Applies a random mutation to all configs'''
configs *= np.random.normal(1.,0.2,
|
np.shape(configs)
|
numpy.shape
|
"""
Code verification using the benchmark of a rod under uniformly distributed load.
"""
import numpy as np
from fenics import Mesh
from dynamic import initialise_results, run_dynamic
from axes_world import one_by_two, fontsize
# =============================================================================
# Verification
u0 =
|
np.array([1., 0., 0.])
|
numpy.array
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# miscellaneous
from os import path
import sys
# numpy and scikit-learn
import numpy as np
from sklearn.metrics import roc_auc_score
# pytorch
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
# In synthetic experiment we generate the output vectors z of the embedding
# layer directly, therefore we create a custom TBSM, rather than instantiate
# an existing general model.
# Synthetic experiment code
# It generates time series data in D dimensions
# with the property that binary label has some dependency
# on coupling between time series components in pairs of dimensions.
def synthetic_experiment():
N, Nt, D, T = 50000, 5000, 5, 10
auc_results = np.empty((0, 5), np.float32)
def generate_data(N, high):
H = np.random.uniform(low=-1.0, high=1.0, size=N * D * T).reshape(N, T, D)
w = np.random.uniform(low=-1.0, high=1.0, size=N * D).reshape(N, 1, D)
return H, w
for K in range(0, 31, 10):
print("num q terms: ", K)
# ----- train set ------
H, w = generate_data(N, 1.0)
wt =
|
np.transpose(w, (0, 2, 1))
|
numpy.transpose
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 16:41:20 2020
@author: petrapoklukar
"""
import numpy as np
import h5py
import os
import gin.tf
@gin.configurable("split_train_and_validation_per_model",
blacklist=["dataset_name", "model_name"])
def create_split_train_and_validation_per_model(dataset_name,
model_name,
random_seed=gin.REQUIRED,
unit_labels=False):
""" Randomly splits the model split into smaller datasets of different
sizes.
Args:
filename: name of the file to split further
"""
if model_name:
model_name = '_{0}_{1}'.format(model_name, str(random_seed))
random_state = np.random.RandomState(random_seed)
SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + ".h5")
dataset_split = h5py.File(SHAPES3D_PATH, 'r')
print(dataset_split.keys())
images_split = dataset_split['images'][()]
labels_split = dataset_split['labels'][()]
indices_split = dataset_split['indices'][()]
dataset_size = len(images_split)
ims = np.array(images_split)
labs = np.array(labels_split)
inds = np.array(indices_split)
if unit_labels:
labels_min = np.array([0., 0., 0., 0.75, 0., -30.])
labels_max = np.array([0.9, 0.9, 0.9, 1.25, 3., 30.])
labels_split = (labels_split - labels_min)/(labels_max - labels_min)
assert(np.min(labels_split) == 0 and np.max(labels_split) == 1)
all_local_indices = random_state.choice(dataset_size, dataset_size, replace=False)
random_state.shuffle(all_local_indices)
splitratio = int(dataset_size * 0.85)
train_local_indices = all_local_indices[:splitratio]
test_local_indices = all_local_indices[splitratio:]
print('Writing files')
for indices, split in list(zip([train_local_indices, test_local_indices],
['_train', '_valid'])):
SPLIT_SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + model_name + split + ".h5")
assert(ims[indices].shape[0] == indices.shape[0])
assert(labs[indices].shape[0] == indices.shape[0])
assert(inds[indices].shape[0] == indices.shape[0])
hf = h5py.File(SPLIT_SHAPES3D_PATH, 'w')
hf.create_dataset('images', data=ims[indices])
hf.create_dataset('labels', data=labs[indices])
hf.create_dataset('indices', data=inds[indices])
hf.close()
dataset_split.close()
@gin.configurable("split_train_and_validation",
blacklist=["dataset_name", "model_name"])
def create_split_train_and_validation(dataset_name,
model_name,
random_seed=gin.REQUIRED,
unit_labels=False):
""" Randomly splits the dataset split into train and validation
splits.
Args:
filename: name of the file to split further
"""
del model_name
random_state = np.random.RandomState(random_seed)
SHAPES3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "3dshapes",
dataset_name + ".h5")
dataset_split = h5py.File(SHAPES3D_PATH, 'r')
print(dataset_split.keys())
images_split = dataset_split['images'][()]
labels_split = dataset_split['labels'][()]
indices_split = dataset_split['indices'][()]
dataset_size = len(images_split)
ims = np.array(images_split)
labs =
|
np.array(labels_split)
|
numpy.array
|
import numpy as np
from numpy import concatenate as cat
from scipy.linalg import toeplitz
from scipy.sparse import csr_matrix
from matlab_functions import row_ismember, interp1
from SurfStatEdg import py_SurfStatEdg
def pacos(x):
return np.arccos( np.minimum(np.abs(x),1) * np.sign(x) )
def py_SurfStatResels(slm, mask=None):
""" Resels of surface or volume data inside a mask.
Parameters
----------
slm : a dictionary with keys 'lat' or 'tri' and, optionally, 'resl'.
slm['lat'] : a 3D numpy array of 1's and 0's.
slm['tri'] : a 2D numpy array of shape (t, 3).
Contains triangles of a surface. slm['tri'].max() is equal to the
number of vertices.
slm['resl'] : a 2D numpy array of shape (e, k).
Sum over observations of squares of differences of normalized
residuals along each edge.
mask : a 1D numpy array of shape (v), dtype 'bool'.
v must be equal to int(slm['tri'].max()).
Contains 1's and 0's (1's are included and 0's are excluded).
Returns
-------
resels : a 2D numpy array of shape (1, (D+1)).
Array of 0,...,D dimensional resels of the mask, EC of the mask
if slm['resl'] is not given.
reselspvert : a 1D numpy array of shape (v).
Array of D-dimensional resels per mask vertex.
edg : a 2D numpy array of shape (e, 2).
Array of edge indices.
"""
if 'tri' in slm:
# Get unique edges. Subtract 1 from edges to conform to Python's
# counting from 0 - RV
tri = np.sort(slm['tri'])-1
edg = np.unique(np.vstack((tri[:,(0,1)], tri[:,(0,2)],
tri[:,(1,2)])),axis=0)
# If no mask is provided, create one with all included vertices set to
# 1. If mask is provided, simply grab the number of vertices from mask.
if mask is None:
v = np.amax(edg)+1
mask = np.full(v,False)
mask[edg-1] = True
else:
#if np.ndim(mask) > 1:
#mask = np.squeeze(mask)
#if mask.shape[0] > 1:
# mask = mask.T
v = mask.size
## Compute the Lipschitz–Killing curvatures (LKC)
m = np.sum(mask)
if 'resl' in slm:
lkc = np.zeros((3,3))
else:
lkc = np.zeros((1,3))
lkc[0,0] = m
# LKC of edges
maskedg = np.all(mask[edg],axis=1)
lkc[0,1] = np.sum(maskedg)
if 'resl' in slm:
r1 = np.mean(np.sqrt(slm['resl'][maskedg,:]),axis=1)
lkc[1,1] = np.sum(r1)
# LKC of triangles
# Made an adjustment from the MATLAB implementation:
# The reselspvert computation is included in the if-statement.
# MATLAB errors when the if statement is false as variable r2 is not
# defined during the computation of reselspvert. - RV
masktri = np.all(mask[tri],1)
lkc[0,2] = np.sum(masktri)
if 'resl' in slm:
loc = row_ismember(tri[masktri,:][:,[0,1]], edg)
l12 = slm['resl'][loc,:]
loc = row_ismember(tri[masktri,:][:,[0,2]], edg)
l13 = slm['resl'][loc,:]
loc = row_ismember(tri[masktri,:][:,[1,2]], edg)
l23 = slm['resl'][loc,:]
a = np.fmax(4*l12*l13-(l12+l13-l23)**2,0)
r2 = np.mean(np.sqrt(a),axis=1)/4
lkc[1,2] = np.sum(np.mean(np.sqrt(l12) +
np.sqrt(l13)+np.sqrt(l23),axis=1))/2
lkc[2,2] = np.nansum(r2,axis=0)
# Compute resels per mask vertex
reselspvert = np.zeros(v)
for j in range(0,3):
reselspvert = reselspvert + \
np.bincount(tri[masktri,j], weights=r2, minlength=v)
D = 2
reselspvert = reselspvert / (D+1) / np.sqrt(4*np.log(2)) ** D
else:
reselspvert = None
if 'lat' in slm:
edg = py_SurfStatEdg(slm)
# The lattice is filled with 5 alternating tetrahedra per cube
I, J, K = np.shape(slm['lat'])
IJ = I*J
i, j = np.meshgrid(range(1,I+1),range(1,J+1))
i = np.squeeze(np.reshape(i,(-1,1)))
j = np.squeeze(np.reshape(j,(-1,1)))
c1 = np.argwhere((((i+j)%2)==0) & (i < I) & (j < J))
c2 = np.argwhere((((i+j)%2)==0) & (i > 1) & (j < J))
c11 = np.argwhere((((i+j)%2)==0) & (i == I) & (j < J))
c21 = np.argwhere((((i+j)%2)==0) & (i == I) & (j > 1))
c12 = np.argwhere((((i+j)%2)==0) & (i < I) & (j == J))
c22 = np.argwhere((((i+j)%2)==0) & (i > 1) & (j == J))
# outcome is 1 lower than MATLAB due to 0-1 counting difference. - RV
d1 = np.argwhere((((i+j)%2)==1) & (i < I) & (j < J))+IJ
d2 = np.argwhere((((i+j)%2)==1) & (i > 1) & (j < J))+IJ
tri1 = cat((
cat((c1, c1+1, c1+1+I),axis=1),
cat((c1, c1+I, c1+1+I),axis=1),
cat((c2-1, c2, c2-1+I),axis=1),
cat((c2, c2-1+I, c2+I),axis=1)),
axis=0)
tri2= cat((
cat((c1, c1+1, c1+1+IJ),axis=1),
cat((c1, c1+IJ, c1+1+IJ),axis=1),
cat((c1, c1+I, c1+I+IJ),axis=1),
cat((c1, c1+IJ, c1+I+IJ),axis=1),
cat((c1, c1+1+I, c1+1+IJ),axis=1),
cat((c1, c1+1+I, c1+I+IJ),axis=1),
cat((c1, c1+1+IJ, c1+I+IJ),axis=1),
cat((c1+1+I, c1+1+IJ, c1+I+IJ),axis=1),
cat((c2-1, c2, c2-1+IJ),axis=1),
cat((c2, c2-1+IJ, c2+IJ),axis=1),
cat((c2-1, c2-1+I, c2-1+IJ),axis=1),
cat((c2-1+I, c2-1+IJ, c2-1+I+IJ),axis=1),
cat((c2, c2-1+I, c2+I+IJ),axis=1),
cat((c2, c2-1+IJ, c2+I+IJ),axis=1),
cat((c2, c2-1+I, c2-1+IJ),axis=1),
cat((c2-1+I, c2-1+IJ, c2+I+IJ),axis=1),
cat((c11, c11+I, c11+I+IJ),axis=1),
cat((c11, c11+IJ, c11+I+IJ),axis=1),
cat((c21-I, c21, c21-I+IJ),axis=1),
cat((c21, c21-I+IJ, c21+IJ),axis=1),
cat((c12, c12+1, c12+1+IJ),axis=1),
cat((c12, c12+IJ, c12+1+IJ),axis=1),
cat((c22-1, c22, c22-1+IJ),axis=1),
cat((c22, c22-1+IJ, c22+IJ),axis=1)),
axis=0)
tri3 = cat((
cat((d1, d1+1, d1+1+I),axis=1),
cat((d1, d1+I, d1+1+I),axis=1),
cat((d2-1, d2, d2-1+I),axis=1),
cat((d2, d2-1+I, d2+I),axis=1)),
axis=0)
tet1 = cat((
cat((c1, c1+1, c1+1+I, c1+1+IJ),axis=1),
cat((c1, c1+I, c1+1+I, c1+I+IJ),axis=1),
cat((c1, c1+1+I, c1+1+IJ, c1+I+IJ),axis=1),
cat((c1, c1+IJ, c1+1+IJ, c1+I+IJ),axis=1),
cat((c1+1+I, c1+1+IJ, c1+I+IJ, c1+1+I+IJ),axis=1),
cat((c2-1, c2, c2-1+I, c2-1+IJ),axis=1),
cat((c2, c2-1+I, c2+I, c2+I+IJ),axis=1),
cat((c2, c2-1+I, c2-1+IJ, c2+I+IJ),axis=1),
cat((c2, c2-1+IJ, c2+IJ, c2+I+IJ),axis=1),
cat((c2-1+I, c2-1+IJ, c2-1+I+IJ, c2+I+IJ),axis=1)),
axis=0)
v = np.int(np.round(np.sum(slm['lat'])))
if mask is None:
mask = np.ones(v,dtype=bool)
reselspvert = np.zeros(v)
vs = np.cumsum(np.squeeze(np.sum(np.sum(slm['lat'],axis=0),axis=0)))
vs = cat((np.zeros(1),vs,np.expand_dims(vs[K-1],axis=0)),axis=0)
vs = vs.astype(int)
es = 0
lat = np.zeros((I,J,2))
lat[:,:,0] = slm['lat'][:,:,0]
lkc = np.zeros((4,4))
for k in range(0,K):
f = (k+1) % 2
if k < (K-1):
lat[:,:,f] = slm['lat'][:,:,k+1]
else:
lat[:,:,f] = np.zeros((I,J))
vid = (np.cumsum(lat.flatten('F')) * np.reshape(lat.T,-1)).astype(int)
if f:
edg1 = edg[np.logical_and(edg[:,0]>(vs[k]-1), \
edg[:,0] <= (vs[k+1]-1)),:]-vs[k]
edg2 = edg[np.logical_and(edg[:,0] > (vs[k]-1), \
edg[:,1] <= (vs[k+2]-1)),:]-vs[k]
# Added a -1 - RV
tri = cat((vid[tri1[np.all(np.reshape(lat.flatten('F')[tri1], \
tri1.shape),1),:]],
vid[tri2[np.all(np.reshape(lat.flatten('F')[tri2], \
tri2.shape),1),:]]),
axis=0)-1
mask1 = mask[np.arange(vs[k],vs[k+2])]
else:
edg1 = cat((
edg[np.logical_and(edg[:,0] > (vs[k]-1), edg[:,1] <= \
(vs[k+1]-1)), :] - vs[k] + vs[k+2] - vs[k+1],
cat((
np.expand_dims(edg[np.logical_and(edg[:,0] <= \
(vs[k+1]-1), \
edg[:,1] > \
(vs[k+1]-1)), 1] \
- vs[k+1],axis=1),
np.expand_dims(edg[np.logical_and(edg[:,0] <= \
(vs[k+1]-1), \
edg[:,1] > \
(vs[k+1]-1)), 0] \
- vs[k] + vs[k+2] \
- vs[k+1],axis=1)),
axis=1)),
axis=0)
edg2 = cat((edg1, edg[np.logical_and(edg[:,0] > (vs[k+1]-1), \
edg[:,1] <= (vs[k+2]-1)),:] - vs[k+1]), axis=0)
# Added a -1 - RV
tri = cat((vid[tri3[np.all(lat.flatten('F')[tri3],axis=1),:]],
vid[tri2[np.all(lat.flatten('F')[tri2],axis=1),:]]),
axis=0)-1
mask1 = cat((
mask[np.arange(vs[k+1], vs[k+2])],
mask[np.arange(vs[k], vs[k+1])]))
# Added a -1 -RV
tet = vid[tet1[np.all(lat.flatten('F')[tet1],axis=1),:]]-1
m1 = np.max(edg2[:,0])
ue = edg2[:,0] + m1 * (edg2[:,1]-1)
e = edg2.shape[0]
ae = np.arange(0,e)
if e < 2 ** 31:
sparsedg = csr_matrix((ae,(ue,np.zeros(ue.shape,dtype=int))),
dtype=np.int)
sparsedg.eliminate_zeros()
##
lkc1 = np.zeros((4,4))
lkc1[0,0] = np.sum(mask[np.arange(vs[k],vs[k+1])])
## LKC of edges
maskedg = np.all(mask1[edg1],axis=1)
lkc1[0,1] = np.sum(maskedg)
if 'resl' in slm:
r1 = np.mean(np.sqrt(slm['resl'][np.argwhere(maskedg)+es,:]),
axis=1)
lkc1[1,1] = np.sum(r1)
## LKC of triangles
masktri = np.all(mask1[tri],axis=1).flatten()
lkc1[0,2] = np.sum(masktri)
if 'resl' in slm:
if all(masktri == False):
# Set these variables to empty arrays to match the MATLAB
# implementation.
lkc1[1,2] = 0
lkc1[2,2] = 0
else:
if e < 2 ** 31:
l12 = slm['resl'][sparsedg[tri[masktri,0] + m1 * \
(tri[masktri,1]-1), 0].toarray() + es, :]
l13 = slm['resl'][sparsedg[tri[masktri,0] + m1 * \
(tri[masktri,2]-1), 0].toarray() + es, :]
l23 = slm['resl'][sparsedg[tri[masktri,1] + m1 * \
(tri[masktri,2]-1), 0].toarray() + es, :]
else:
l12 = slm['resl'][interp1(ue,ae,tri[masktri,0] + m1 * \
(tri[masktri,1] - 1),kind='nearest') + es, :]
l13 = slm['resl'][interp1(ue,ae,tri[masktri,0] + m1 * \
(tri[masktri,2] - 1),kind='nearest') + es, :]
l23 = slm['resl'][interp1(ue,ae,tri[masktri,1] + m1 * \
(tri[masktri,2] - 1),kind='nearest') + es, :]
a = np.fmax(4 * l12 * l13 - (l12+l13-l23) ** 2, 0)
r2 = np.mean(np.sqrt(a),axis=1)/4
lkc1[1,2] = np.sum(np.mean(np.sqrt(l12) + np.sqrt(l13) +
np.sqrt(l23),axis=1))/2
lkc1[2,2] = np.sum(r2)
# The following if-statement has nargout >=2 in MATLAB,
# but there's no Python equivalent so ignore that. - RV
if K == 1:
for j in range(0,3):
if f:
v1 = tri[masktri,j] + vs[k]
else:
v1 = tri[masktri,j] + vs[k+1]
v1 = v1 - int(vs > vs[k+2]) * (vs[k+2]-vs[k])
reselspvert += np.bincount(v1, r2, v)
## LKC of tetrahedra
masktet = np.all(mask1[tet],axis=1).flatten()
lkc1[0,3] = np.sum(masktet)
if 'resl' in slm and k < (K-1):
if e < 2 ** 31:
l12 = slm['resl'][(sparsedg[tet[masktet,0] + m1 * \
(tet[masktet,1]-1),0].toarray() + es).tolist(), :]
l13 = slm['resl'][(sparsedg[tet[masktet,0] + m1 * \
(tet[masktet,2]-1),0].toarray() + es).tolist(), :]
l23 = slm['resl'][(sparsedg[tet[masktet,1] + m1 * \
(tet[masktet,2]-1),0].toarray() + es).tolist(), :]
l14 = slm['resl'][(sparsedg[tet[masktet,0] + m1 * \
(tet[masktet,3]-1),0].toarray() + es).tolist(), :]
l24 = slm['resl'][(sparsedg[tet[masktet,1] + m1 * \
(tet[masktet,3]-1),0].toarray() + es).tolist(), :]
l34 = slm['resl'][(sparsedg[tet[masktet,2] + m1 * \
(tet[masktet,3]-1),0].toarray() + es).tolist(), :]
else:
l12 = slm['resl'][interp1(ue,ae,tet[masktet,0] + m1 * \
(tet[masktet,1]-1),kind='nearest')+es,:]
l13 = slm['resl'][interp1(ue,ae,tet[masktet,0] + m1 * \
(tet[masktet,2]-1),kind='nearest')+es,:]
l23 = slm['resl'][interp1(ue,ae,tet[masktet,1] + m1 * \
(tet[masktet,2]-1),kind='nearest')+es,:]
l14 = slm['resl'][interp1(ue,ae,tet[masktet,0] + m1 * \
(tet[masktet,3]-1),kind='nearest')+es,:]
l24 = slm['resl'][interp1(ue,ae,tet[masktet,1] + m1 * \
(tet[masktet,3]-1),kind='nearest')+es,:]
l34 = slm['resl'][interp1(ue,ae,tet[masktet,2] + m1 * \
(tet[masktet,3]-1),kind='nearest')+es,:]
a4 = np.fmax(4 * l12 * l13 - (l12 + l13 -l23) ** 2, 0)
a3 = np.fmax(4 * l12 * l14 - (l12 + l14 -l24) ** 2, 0)
a2 = np.fmax(4 * l13 * l14 - (l13 + l14 -l34) ** 2, 0)
a1 = np.fmax(4 * l23 * l24 - (l23 + l24 -l34) ** 2, 0)
d12 = 4 * l12 * l34 - (l13 + l24 - l23 - l14) ** 2
d13 = 4 * l13 * l24 - (l12 + l34 - l23 - l14) ** 2
d14 = 4 * l14 * l23 - (l12 + l34 - l24 - l13) ** 2
h = np.logical_or(a1 <= 0, a2 <= 0)
delta12 = np.sum(np.mean(np.sqrt(l34) * pacos((d12-a1-a2) / \
np.sqrt(a1 * a2 + h) / 2 * (1-h) + h),axis=1))
h = np.logical_or(a1 <= 0, a3 <= 0)
delta13 = np.sum(np.mean(np.sqrt(l24) * pacos((d13-a1-a3) / \
np.sqrt(a1 * a3 + h) / 2 * (1-h) + h),axis=1))
h = np.logical_or(a1 <= 0, a4 <= 0)
delta14 = np.sum(np.mean(np.sqrt(l23) * pacos((d14-a1-a4) / \
np.sqrt(a1 * a4 + h) / 2 * (1-h) + h),axis=1))
h = np.logical_or(a2 <= 0, a3 <= 0)
delta23 = np.sum(np.mean(np.sqrt(l14) * pacos((d14-a2-a3) / \
np.sqrt(a2 * a3 + h) / 2 * (1-h) + h),axis=1))
h = np.logical_or(a2 <= 0, a4 <= 0)
delta24 = np.sum(np.mean(np.sqrt(l13) * pacos((d13-a2-a4) / \
np.sqrt(a2 * a4 + h) / 2 * (1-h) + h),axis=1))
h = np.logical_or(a3 <= 0, a4 <= 0)
delta34 = np.sum(np.mean(np.sqrt(l12) * pacos((d12-a3-a4) / \
|
np.sqrt(a3 * a4 + h)
|
numpy.sqrt
|
import tensorflow as tf
import numpy as np
from scipy import signal
from scipy.ndimage import gaussian_filter
from PIL import Image, ImageDraw
import random
import glob, os
import csv
from multiprocessing import Pool
import subprocess
import time
width = 512
height = 512
scale = 2
np.random.seed(os.getpid() + int(time.time()))
random.seed(os.getpid() + int(time.time()))
class BaseData:
def __init__(self):
self.load_idmap()
def load_idmap(self):
self.glyph_id = {}
self.glyphs = {}
self.glyph_type = {}
self.glyph_id[''] = 0
self.glyphs[0] = ''
with open(os.path.join('data','codepoints.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
codehex = row[1]
if len(codehex) > 7:
code = eval('"' + ''.join(['\\u' + codehex[i*4:i*4+4] for i in range(len(codehex) // 4)]) + '"')
else:
code = chr(int(codehex, 16))
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
with open(os.path.join('data','id_map.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
code = bytes.fromhex(row[2]).decode()
if code in self.glyph_id:
k = self.glyph_id[code]
else:
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
k = i
self.glyph_type[k] = int(row[3])
self.id_count = len(self.glyph_id)
def sub_load(args):
exe = os.path.join('data','load_font','load_font.exe')
if not os.path.exists(exe):
exe = os.path.join('data','load_font','load_font')
proc = subprocess.Popen([
exe,
args[0],
'128',
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = {}
for c in args[1]:
if len(c) == 1:
charbuf = c.encode("utf-32-le")
proc.stdin.write(charbuf[:4])
proc.stdin.flush()
result = proc.stdout.read(32)
code = result[:4]
rows = int.from_bytes(result[4:8], 'little')
width = int.from_bytes(result[8:12], 'little')
boundingWidth = int.from_bytes(result[12:16], 'little', signed=True)
boundingHeight = int.from_bytes(result[16:20], 'little', signed=True)
horiBearingX = int.from_bytes(result[20:24], 'little', signed=True)
horiBearingY = int.from_bytes(result[24:28], 'little', signed=True)
horiAdvance = int.from_bytes(result[28:32], 'little', signed=True)
if rows * width == 0:
continue
assert(charbuf == code)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
horiBearingX = horiBearingX / 64
horiBearingY = horiBearingY / 64
horiAdvance = horiAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': horiAdvance,
'image': img,
}
}
result = proc.stdout.read(28)
rows = int.from_bytes(result[:4], 'little')
width = int.from_bytes(result[4:8], 'little')
boundingWidth = int.from_bytes(result[8:12], 'little', signed=True)
boundingHeight = int.from_bytes(result[12:16], 'little', signed=True)
vertBearingX = int.from_bytes(result[16:20], 'little', signed=True)
vertBearingY = int.from_bytes(result[20:24], 'little', signed=True)
vertAdvance = int.from_bytes(result[24:28], 'little', signed=True)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
vertBearingX = vertBearingX / 64
vertBearingY = vertBearingY / 64
vertAdvance = vertAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'vertBearingX': vertBearingX,
'vertBearingY': vertBearingY,
'vertAdvance': vertAdvance,
'image': img,
}
ret[(args[0],c)] = value
else:
pass
proc.stdin.close()
return ret
def sub_load_image(path):
dirnames = glob.glob(os.path.join(path, '*'))
ret = {}
for d in dirnames:
c_code = os.path.basename(d)
char = str(bytes.fromhex(c_code), 'utf-8')
count = 0
for f in glob.glob(os.path.join(d, '*.png')):
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%count,char)] = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': 96.0,
'image': img,
},
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
}
count += 1
vert_imgs = glob.glob(os.path.join(d, 'vert', '*.png'))
if 0 < len(vert_imgs) <= count:
for i in range(count):
f = vert_imgs[i % len(vert_imgs)]
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%i,char)]['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
elif 0 < len(vert_imgs):
vcount = 0
for f in vert_imgs:
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%vcount,char)] = {
'horizontal': ret[('hand%06d'%(vcount % count),char)]['horizontal'],
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': vertBearingY,
'vertBearingY': vertBearingX,
'vertAdvance': 96.0,
'image': img,
}
}
vcount += 1
return ret
def gaussian_kernel(kernlen=7, xstd=1., ystd=1.):
gkern1dx = signal.gaussian(kernlen, std=xstd).reshape(kernlen, 1)
gkern1dy = signal.gaussian(kernlen, std=ystd).reshape(kernlen, 1)
gkern2d = np.outer(gkern1dy, gkern1dx)
return gkern2d
def apply_random_filter(images):
p = np.random.uniform(0., 1.)
if p < 0.25:
sigma = np.random.uniform(0., 1.75)
return gaussian_filter(images, sigma=sigma)
if p < 0.5:
sigma = np.random.uniform(0., 6.)
gauss = gaussian_filter(images, sigma=sigma)
gain = np.random.uniform(0., 5.)
return (1 + gain) * images - gain * gauss
return images
def is_Font_match(font, target):
if target.startswith('hand'):
return font.startswith('hand')
else:
return font == target
class FontData(BaseData):
def __init__(self):
super().__init__()
self.img_cache = {}
print('loading handwrite image')
self.img_cache.update(sub_load_image(os.path.join('data','handwritten')))
print('loading enfont')
enfont_files = sorted(glob.glob(os.path.join('data','enfont','*.ttf')) + glob.glob(os.path.join('data','enfont','*.otf')))
en_glyphs = [self.glyphs[key] for key in self.glyphs.keys() if self.glyph_type.get(key,-1) in [0,1,2,6]]
items = [(f, en_glyphs) for f in enfont_files]
total = len(enfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
print('loading jpfont')
jpfont_files = sorted(glob.glob(os.path.join('data','jpfont','*.ttf')) + glob.glob(os.path.join('data','jpfont','*.otf')))
items = [(f, list(self.glyphs.values())) for f in jpfont_files]
total = len(jpfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
for key in self.img_cache:
i = self.glyph_id[key[1]]
if i not in self.glyph_type:
self.glyph_type[i] = type_count_max
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
gtype_count = [0 for _ in range(type_count_max)]
type_count = [0 for _ in range(type_count_max)]
for key in self.img_cache:
t = self.glyph_type[self.glyph_id[key[1]]]
type_count[t] += 1
for k in self.glyph_type:
gtype_count[self.glyph_type[k]] += 1
self.image_keys = list(self.img_cache.keys())
self.test_keys = self.get_test_keys()
self.train_keys = self.get_train_keys()
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0]
self.prob_map = [p/t for p,t in zip(self.prob_map, type_count)]
self.random_probs_train = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.random_probs_test = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_kanji = [0, 0, 0, 0, 0, 1.0, 0, 0, 1.0, 1.0, 0.5, 0]
self.prob_map_kanji = [p/t for p,t in zip(self.prob_map_kanji, type_count)]
self.kanji_probs_train = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.kanji_probs_test = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_num = [1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_num = [p/t for p,t in zip(self.prob_map_num, type_count)]
self.num_probs_train = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.num_probs_test = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_alpha = [0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_alpha = [p/t for p,t in zip(self.prob_map_alpha, type_count)]
self.alpha_probs_train = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.alpha_probs_test = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_hira = [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_hira = [p/t for p,t in zip(self.prob_map_hira, type_count)]
self.hira_probs_train = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.hira_probs_test = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
self.train_keys_num = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.train_num_fonts = list(set([key[0] for key in self.train_keys_num]))
self.test_keys_num = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.test_num_fonts = list(set([key[0] for key in self.test_keys_num]))
self.train_keys_capital = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.train_capital_fonts = list(set([key[0] for key in self.train_keys_capital]))
self.test_keys_capital = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.test_capital_fonts = list(set([key[0] for key in self.test_keys_capital]))
self.train_keys_small = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.train_small_fonts = list(set([key[0] for key in self.train_keys_small]))
self.test_keys_small = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.test_small_fonts = list(set([key[0] for key in self.test_keys_small]))
self.train_keys_alpha = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.train_alpha_fonts = list(set([key[0] for key in self.train_keys_alpha]))
self.test_keys_alpha = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.test_alpha_fonts = list(set([key[0] for key in self.test_keys_alpha]))
self.train_keys_jp = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.test_keys_jp = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.train_jp_fonts = list(set([key[0] for key in self.train_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_jp_fonts])
self.train_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_jp_fonts]
self.test_jp_fonts = list(set([key[0] for key in self.test_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_jp_fonts])
self.test_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_jp_fonts]
self.train_keys_hira = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.test_keys_hira = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.train_hira_fonts = list(set([key[0] for key in self.train_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_hira_fonts])
self.train_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_hira_fonts]
self.test_hira_fonts = list(set([key[0] for key in self.test_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_hira_fonts])
self.test_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_hira_fonts]
self.train_keys_jpnum = [x for x in self.train_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.train_jp_fonts)]
self.test_keys_jpnum = [x for x in self.test_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.test_jp_fonts)]
self.train_jpnum_fonts = list(set([key[0] for key in self.train_keys_jpnum]))
self.train_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.train_jpnum_fonts]
self.test_jpnum_fonts = list(set([key[0] for key in self.test_keys_jpnum]))
self.test_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.test_jpnum_fonts]
self.prob_map_clustering = [
gtype_count[0] / type_count[0],
gtype_count[1] / type_count[1],
gtype_count[2] / type_count[2],
gtype_count[3] / type_count[3],
gtype_count[4] / type_count[4],
gtype_count[5] / type_count[5],
gtype_count[6] / type_count[6],
0.,
0.,
0.,
0.,
0.
]
self.random_background = glob.glob(os.path.join('data','background','*'))
self.max_std = 8.0
self.min_ker = 4
def get_test_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
test_keys = [k for k in keys if fontname(k[0]).startswith('Noto')]
return test_keys
def get_train_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
train_keys = [k for k in keys if not fontname(k[0]).startswith('Noto')]
return train_keys
def load_background_images(self):
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').getchannel('A')
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
im_file = random.choice(self.random_background)
im = Image.open(im_file)
im = remove_transparency(im).convert('RGB')
scale_min = max(width / im.width, height / im.height)
scale_max = max(scale_min + 0.5, 1.5)
s = np.random.uniform(scale_min, scale_max)
im = im.resize((int(im.width * s)+1, int(im.height * s)+1))
x1 = np.random.randint(0, im.width - width)
y1 = np.random.randint(0, im.height - height)
im_crop = im.crop((x1, y1, x1 + width, y1 + height))
img = np.asarray(im_crop).astype(np.float32)
img = img / 128. - 1.
if np.random.uniform() < 0.5:
img = img[::-1,:,:]
if np.random.uniform() < 0.5:
img = img[:,::-1,:]
brightness = np.random.uniform(-1.0, 1.0)
brightness = np.array([brightness,brightness,brightness])
img += brightness[None,None,:]
contrast = np.random.uniform(0.2, 1.8)
contrast = np.array([contrast,contrast,contrast])
img = img * contrast[None,None,:]
img = np.clip(img, -1.0, 1.0)
return img
def tateyokotext_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] == 0 else 0. for key in keys]
selection2 = [key for key in random.choices(keys, k=max_count*2, weights=probs2)]
base_line = width - text_size // 2
line_space = int(text_size * random.uniform(1.05, 2.0))
line_start = 0
line_end = 0
isnum = -1
i = 0
for key in selection:
if isnum < 0 or isnum > 1:
if np.random.uniform() < 0.1:
isnum = 0
else:
isnum = -1
if isnum < 0:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
horiBearingX = 0
else:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
key = selection2[i]
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
vertBearingX = -text_size * 0.5
vertBearingY = 0
vertAdvance = text_size
if line_end + vertAdvance >= height:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size / 2 < 0:
break
line_start = 0
line_end = 0
if isnum >= 0:
t = (line_end + vertBearingY + text_size * 0.75 - horiBearingY) / height
else:
t = (line_end + vertBearingY) / height
if isnum > 0:
l = (base_line + horiBearingX) / width
else:
l = (base_line + vertBearingX + horiBearingX) / width
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
if isnum > 0:
l = int(np.clip(base_line + horiBearingX, 0, width - w))
else:
l = int(np.clip(base_line + vertBearingX + horiBearingX, 0, width - w))
if isnum >= 0:
t = int(np.clip(line_end + vertBearingY + text_size * 0.75 - horiBearingY, 0, height - h))
else:
t = int(np.clip(line_end + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
if isnum != 0:
line_end += vertAdvance
if isnum >= 0:
isnum += 1
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yoko_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
base_line += line_space
if base_line + text_size >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tate_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 16
max_pixel = 100
text_size = random.randint(min_pixel, max_pixel)
line_space = int(text_size * random.uniform(1.05, 2.0))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
base_line = width - line_space + text_size // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
base_line -= line_space
if base_line - text_size / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def tatefurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * height)
break_space = text_size2 * random.uniform(0.6, 1.0)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = width - line_space + text_size2 // 2
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(height, height if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
if temp_lineend + vertAdvance < line_end:
linebuf.append((key,item))
temp_lineend += vertAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((base_line // scale, line_start // scale),
(base_line // scale, line_end // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
vertBearingX = item['vertBearingX'] / 128 * text_size2
vertBearingY = item['vertBearingY'] / 128 * text_size2
vertAdvance = item['vertAdvance'] / 128 * text_size2
l = (base_line + vertBearingX) / width
t = (line_start + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
l = int(np.clip(base_line + vertBearingX, 0, width - w))
t = int(np.clip(line_start + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start += int(vertAdvance)
# ふりがな処理
base_line2 = base_line + text_size2 // 2 + text_size // 2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['vertical']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
vertBearingX = item['vertBearingX'] / 128 * text_size
vertBearingY = item['vertBearingY'] / 128 * text_size
vertAdvance = item['vertAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
line_start2 += int(vertAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (base_line2 + vertBearingX) / width
t = (line_start2 + vertBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
l = int(np.clip(base_line2 + vertBearingX, 0, width - w))
t = int(np.clip(line_start2 + vertBearingY, 0, height - h))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[t:t+h,l:l+w] = np.maximum(
images[t:t+h,l:l+w],
im)
line_start2 += int(vertAdvance)
if line_start2 != line_start2p:
draw.line(((base_line2 // scale, line_start2p // scale),
(base_line2 // scale, line_start2 // scale)), fill=255, width=3)
base_line -= line_space
if base_line - text_size2 / 2 < 0:
if block_no == 0:
sep_end = base_line + line_space
base_line = width - line_space + text_size2 // 2
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
right = (width - line_space + text_size2 // 2) // scale
left = sep_end // scale
seps[l-1:l+2, left:right] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def yokofurigana_images(self, keys, fonts, font_p):
max_count = 256
angle_max = 15.0
min_pixel = 12
max_pixel = 50
text_size = random.randint(min_pixel, max_pixel)
text_size2 = text_size * 2
line_space = int(text_size2 * random.uniform(1.45, 1.7))
block_count = 2
line_break = int(random.uniform(0.3,0.7) * width)
break_space = text_size2 * random.uniform(0.6, 1.5)
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
select_font = random.choices(fonts, k=1, weights=font_p)[0]
probs = [1. if is_Font_match(key[0], select_font) else 0. for key in keys]
selection = [key for key in random.choices(keys, k=max_count, weights=probs)]
probs2 = [1. if is_Font_match(key[0], select_font) and self.glyph_type[self.glyph_id[key[1]]] in [3,4] else 0. for key in keys]
selection2 = iter([key for key in random.choices(keys, k=max_count*2, weights=probs2)])
base_line = line_space
block_no = 0
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
text_count = [0, 0]
sep_end = 0
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
if temp_lineend + horiAdvance < line_end:
linebuf.append((key, item))
temp_lineend += horiAdvance
else:
remain = line_end - temp_lineend
if block_no == 0:
line_start += remain
if len(linebuf) > 1:
draw.line(((line_start // scale, base_line // scale),
(line_end // scale, base_line // scale)), fill=255, width=3)
text_count[block_no] += len(linebuf)
for key, item in linebuf:
w = item['width'] / 128 * text_size2
h = item['rows'] / 128 * text_size2
horiBearingX = item['horiBearingX'] / 128 * text_size2
horiBearingY = item['horiBearingY'] / 128 * text_size2
horiAdvance = item['horiAdvance'] / 128 * text_size2
l = (line_start + horiBearingX) / width
t = (base_line - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size2), 1)
h = max(int(item['rows'] / 128 * text_size2), 1)
top = int(np.clip(base_line - horiBearingY, 0, height - h))
left = int(np.clip(line_start + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start += int(horiAdvance)
# ふりがな処理
base_line2 = base_line - text_size2
line_start2 = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
if block_no == 0:
line_start2 += remain
line_end2 = line_start
line_start2p = line_start2
while line_start2 < line_end2:
key2 = next(selection2, None)
if key2 is None:
break
item = self.img_cache[key2]['horizontal']
if item['width'] * item['rows'] == 0:
continue
w = item['width'] / 128 * text_size
h = item['rows'] / 128 * text_size
horiBearingX = item['horiBearingX'] / 128 * text_size
horiBearingY = item['horiBearingY'] / 128 * text_size
horiAdvance = item['horiAdvance'] / 128 * text_size
if np.random.uniform() < 0.2:
# ここは空ける
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
line_start2 += int(horiAdvance)
line_start2p = line_start2
continue
# ここはふりがな
l = (line_start2 + horiBearingX) / width
t = (base_line2 - horiBearingY) / height
w = w / width
h = h / height
cx = l + w / 2
cy = t + h / 2
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
if center_ymax - center_ymin > 1 and center_xmax - center_xmin > 1:
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
w = max(int(item['width'] / 128 * text_size), 1)
h = max(int(item['rows'] / 128 * text_size), 1)
top = int(np.clip(base_line2 - horiBearingY, 0, height - h))
left = int(np.clip(line_start2 + horiBearingX, 0, width - w))
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[top:top+h,left:left+w] = np.maximum(
images[top:top+h,left:left+w],
im)
line_start2 += int(horiAdvance)
if line_start2 != line_start2p:
draw.line(((line_start2p // scale, base_line // scale),
(line_start // scale, base_line // scale)), fill=255, width=3)
base_line += line_space
if base_line + text_size2 >= height:
if block_no == 0:
sep_end = base_line - line_space
base_line = line_space
block_no += 1
if block_no >= block_count:
break
line_start = int(max(0, 0 if block_count == 1 or block_no == 0 else line_break + break_space))
line_end = int(min(width, width if block_count == 1 or block_no == 1 else line_break - break_space))
temp_lineend = line_start
linebuf = []
if all(t > 1 for t in text_count):
l = max(1,line_break // scale)
t = line_space // 2 // scale
b = sep_end // scale
seps[t:b, l-1:l+2] = 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
lines = lines.rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, np.asarray(lines) / 255., sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, False)
def null_images(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, seps], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_random_line(self):
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = Image.fromarray(seps)
draw1 = ImageDraw.Draw(seps)
images = Image.fromarray(images)
draw2 = ImageDraw.Draw(images)
linew = int(np.clip(np.random.uniform() * 20, scale, 20))
x1 = np.random.normal() * width / 2 + width / 2
y1 = np.random.normal() * height / 2 + height / 2
x2 = np.random.normal() * width / 2 + width / 2
y2 = np.random.normal() * height / 2 + height / 2
draw1.line(((x1 // scale, y1 // scale), (x2 // scale, y2 // scale)), fill=255, width=linew//scale+1)
draw2.line(((x1, y1), (x2, y2)), fill=255, width=linew)
labels = np.stack([keymap, xsizes, ysizes, offsetx, offsety, lines, np.asarray(seps) / 255.], -1)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
images = np.asarray(images) / 255.
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_random(self, keys, probs):
max_count = 64
angle_max = 15.0
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
angle = angle_max * np.random.normal() / 180 * np.pi
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
selection = [key for key in random.choices(keys, k=np.random.randint(2,max_count), weights=probs)]
i = 0
boxprev = np.zeros([0, 4])
if random.random() < 0.1:
margin = 20
line_c = random.randint(0,3)
lw = random.randint(2, 10)
if line_c == 0:
x = random.randrange(width // 2, width)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, :px] = 1
images[y:y+lw, :x] = 255
boxprev = np.concatenate([boxprev, [[0, (x + margin)/width, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 1:
x = random.randrange(0, width // 2)
y = random.randrange(0, height - lw)
px = x // scale
py = y // scale
seps[py:py+lw//scale+1, px:] = 1
images[y:y+lw, x:] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, 1, (y - margin)/height, (y+lw + margin)/height]]])
elif line_c == 2:
y = random.randrange(height // 2, height)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[:py, px:px+lw//scale+1] = 1
images[:y, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, 0, (y + margin)/height]]])
elif line_c == 3:
y = random.randrange(0, height // 2)
x = random.randrange(0, width - lw)
px = x // scale
py = y // scale
seps[py:, px:px+lw//scale+1] = 1
images[y:, x:x+lw] = 255
boxprev = np.concatenate([boxprev, [[(x - margin)/width, (x+lw + margin)/width, (y - margin)/height, 1]]])
if random.random() < 0.5:
min_pixel = 20
max_pixel = width
else:
min_pixel = 20
max_pixel = width // 3
for key in selection:
item = self.img_cache[key]['horizontal']
if item['width'] * item['rows'] == 0:
continue
if random.random() < 0.5:
tile_size = random.randint(min_pixel, max_pixel)
else:
tile_size = int(np.exp(random.uniform(np.log(min_pixel), np.log(max_pixel))))
w = item['width'] / 128 * tile_size
h = item['rows'] / 128 * tile_size
aspects = np.clip(np.random.normal() * 0.1 + 1.0, 0.75, 1.25)
if random.random() < 0.5:
aspects = 1.0 / aspects
w *= aspects
h /= aspects
tile_left = random.randint(0, int(width - tile_size))
tile_top = random.randint(0, int(height - tile_size))
if tile_top + h >= height or tile_left + w >= width:
continue
left = tile_left / width
top = tile_top / height
w = w / width
h = h / height
cx = left + w/2
cy = top + h/2
if np.random.uniform() < 0.1:
invert = True
x1 = cx - w/2 * 1.25
x2 = cx + w/2 * 1.25
y1 = cy - h/2 * 1.25
y2 = cy + h/2 * 1.25
inter_xmin = np.maximum(boxprev[:,0], x1)
inter_ymin = np.maximum(boxprev[:,2], y1)
inter_xmax = np.minimum(boxprev[:,1], x2)
inter_ymax = np.minimum(boxprev[:,3], y2)
else:
invert = False
inter_xmin = np.maximum(boxprev[:,0], cx - w/2 * 1.1)
inter_ymin = np.maximum(boxprev[:,2], cy - h/2 * 1.1)
inter_xmax = np.minimum(boxprev[:,1], cx + w/2 * 1.1)
inter_ymax = np.minimum(boxprev[:,3], cy + h/2 * 1.1)
inter_w = np.maximum(inter_xmax - inter_xmin, 0.)
inter_h = np.maximum(inter_ymax - inter_ymin, 0.)
inter_vol = inter_w * inter_h
if np.any(inter_vol > 0):
continue
kernel_size = max(self.min_ker, int(max(w, h) / (2 * scale) * width))
std_x = min(self.max_std, max(self.min_ker, w / (2 * scale) * width) / 3)
std_y = min(self.max_std, max(self.min_ker, h / (2 * scale) * height) / 3)
center_kernel = gaussian_kernel(kernlen=kernel_size*2+1, xstd=std_x, ystd=std_y)
center_xmin = int(cx / scale * width) - kernel_size
center_xmax = int(cx / scale * width) + kernel_size + 1
center_ymin = int(cy / scale * height) - kernel_size
center_ymax = int(cy / scale * height) + kernel_size + 1
padx1 = max(0, 0 - center_xmin)
padx2 = max(0, center_xmax - width // scale)
pady1 = max(0, 0 - center_ymin)
pady2 = max(0, center_ymax - height // scale)
center_xmin += padx1
center_xmax -= padx2
center_ymin += pady1
center_ymax -= pady2
ker = kernel_size * 2 + 1
keymap[center_ymin:center_ymax, center_xmin:center_xmax] = np.maximum(keymap[center_ymin:center_ymax, center_xmin:center_xmax], center_kernel[pady1:ker-pady2,padx1:ker-padx2])
size_xmin = np.clip(int((cx - w/2) * width / scale), 0, width // scale)
size_xmax = np.clip(int((cx + w/2) * width / scale) + 1, 0, width // scale)
size_ymin = np.clip(int((cy - h/2) * height / scale), 0, height // scale)
size_ymax = np.clip(int((cy + h/2) * height / scale) + 1, 0, height // scale)
size_mapx, size_mapy = np.meshgrid(np.arange(size_xmin, size_xmax) - cx * width / scale, np.arange(size_ymin, size_ymax) - cy * height / scale)
size_map = size_mapx ** 2 / max(w/2 * width / scale, 1) ** 2 + size_mapy ** 2 / max(h/2 * height / scale, 1) ** 2 < 1
center_x = int(cx / scale * width)
center_y = int(cy / scale * height)
offset_x = (cx * width % scale) / width * np.cos(angle)
offset_y = (cy * height % scale) / height * np.sin(angle + np.pi / 2)
offset_x += pad_x % scale
offset_y += pad_y % scale
offset_x = offset_x / scale - (np.arange(size_xmin, size_xmax) - center_x) * np.cos(angle)
offset_y = offset_y / scale - (np.arange(size_ymin, size_ymax) - center_y) * np.sin(angle + np.pi / 2)
offset_x = offset_x[np.newaxis,...] - np.linspace(-(size_ymax-size_ymin) * np.sin(angle) / 2, (size_ymax-size_ymin) * np.sin(angle) / 2, size_ymax-size_ymin)[...,np.newaxis]
offset_y = offset_y[...,np.newaxis] - np.linspace(-(size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, (size_xmax-size_xmin) * np.cos(angle + np.pi / 2) / 2, size_xmax-size_xmin)[np.newaxis,...]
offsetx[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_x, offsetx[size_ymin:size_ymax, size_xmin:size_xmax])
offsety[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, offset_y, offsety[size_ymin:size_ymax, size_xmin:size_xmax])
fixw = w * np.abs(np.cos(angle)) + h * np.abs(np.sin(angle))
fixh = h * np.abs(np.cos(angle)) + w * np.abs(np.sin(angle))
fixw = np.log10(fixw * 10)
fixh = np.log10(fixh * 10)
xsizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixw, xsizes[size_ymin:size_ymax, size_xmin:size_xmax])
ysizes[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, fixh, ysizes[size_ymin:size_ymax, size_xmin:size_xmax])
id_char = self.glyph_id[key[1]]
ids[size_ymin:size_ymax, size_xmin:size_xmax] = np.where(size_map, id_char, ids[size_ymin:size_ymax, size_xmin:size_xmax])
if invert:
boxprev = np.concatenate([boxprev, [[x1, x2, y1, y2]]])
else:
boxprev = np.concatenate([boxprev, [[cx - w/2, cx + w/2, cy - h/2, cy + h/2]]])
w = max(int(item['width'] / 128 * tile_size * aspects), 1)
h = max(int(item['rows'] / 128 * tile_size / aspects), 1)
im = np.asarray(Image.fromarray(item['image']).resize((w,h)))
images[tile_top:tile_top+h,tile_left:tile_left+w] = np.maximum(
images[tile_top:tile_top+h,tile_left:tile_left+w],
im)
if invert:
x1 = int(x1 * width)
x2 = int(x2 * width)
y1 = int(y1 * height)
y2 = int(y2 * height)
crop = images[y1:y2,x1:x2]
images[y1:y2,x1:x2] = 255 - crop
i += 1
im = Image.fromarray(images).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x, pad_y))
keymapim1 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
keymapim2 = Image.fromarray(keymap).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
keymapim = np.maximum(keymapim1, keymapim2)
xsizeim = Image.fromarray(xsizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
ysizeim = Image.fromarray(ysizes).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
xoffsetim = Image.fromarray(offsetx).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
yoffsetim = Image.fromarray(offsety).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
sepim = Image.fromarray(seps).rotate(angle / np.pi * 180, resample=Image.Resampling.BILINEAR, translate=(pad_x / scale, pad_y / scale))
labels = np.stack([keymapim, xsizeim, ysizeim, xoffsetim, yoffsetim, lines, sepim], -1)
idsim = Image.fromarray(ids).rotate(angle / np.pi * 180, resample=Image.Resampling.NEAREST, translate=(pad_x / scale, pad_y / scale))
images = np.asarray(im) / 255.
ids = np.asarray(idsim)
images = apply_random_filter(images)
return self.sub_constructimage(images, labels, ids, len(self.random_background) > 0)
def load_images_fill(self, keys, fonts):
max_count = 64
angle_max = 15.0
min_pixel = 24
max_pixel = 200
if random.random() < 0.5:
tile_size = random.randint(min_pixel, max_pixel)
else:
tile_size = int(np.exp(random.uniform(np.log(min_pixel), np.log(max_pixel))))
images = np.zeros([height, width], dtype=np.float32)
keymap = np.zeros([height // scale, width // scale], dtype=np.float32)
xsizes = np.zeros([height // scale, width // scale], dtype=np.float32)
ysizes = np.zeros([height // scale, width // scale], dtype=np.float32)
offsetx = np.zeros([height // scale, width // scale], dtype=np.float32)
offsety = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = np.zeros([height // scale, width // scale], dtype=np.float32)
lines = Image.fromarray(lines)
draw = ImageDraw.Draw(lines)
seps = np.zeros([height // scale, width // scale], dtype=np.float32)
ids = np.zeros([height // scale, width // scale], dtype=np.int32)
tile_left = 0
tile_base = 0
angle = angle_max * np.random.normal() / 180 * np.pi
if np.random.rand() < 0.5:
angle -= np.pi / 2
angle = np.clip(angle, -np.pi, np.pi)
pad_x = np.random.normal() * width / 20
pad_y = np.random.normal() * height / 20
aspects = np.clip(
|
np.random.normal()
|
numpy.random.normal
|
""" Models. """
import numpy as np
import inspect
import tensorflow as tf
from scipy.stats import ttest_ind
from sklearn.neighbors import KernelDensity
from epi.error_formatters import format_type_err_msg
from epi.normalizing_flows import NormalizingFlow
from epi.util import (
gaussian_backward_mapping,
aug_lag_vars,
unbiased_aug_grad,
AugLagHPs,
array_str,
np_column_vec,
plot_square_mat,
get_hash,
set_dir_index,
get_dir_index,
dbg_check,
)
import matplotlib.pyplot as plt
from matplotlib import animation
import pandas as pd
import seaborn as sns
import pickle
import time
import os
REAL_NUMERIC_TYPES = (int, float)
class Parameter(object):
"""Univariate parameter of a model.
:param name: Parameter name.
:type name: str
:param D: Number of dimensions of parameter.
:type D: int
:param lb: Lower bound of variable, defaults to `np.NINF*np.ones(D)`.
:type lb: np.ndarray, optional
:param ub: Upper bound of variable, defaults to `np.PINF*np.ones(D)`.
:type ub: np.ndarray, optional
"""
def __init__(self, name, D, lb=None, ub=None):
"""Constructor method."""
self._set_name(name)
self._set_D(D)
self._set_bounds(lb, ub)
def _set_name(self, name):
if type(name) is not str:
raise TypeError(format_type_err_msg(self, "name", name, str))
self.name = name
def _set_D(self, D):
if type(D) is not int:
raise TypeError(format_type_err_msg(self, "D", D, int))
if D < 1:
raise ValueError("Dimension of parameter must be positive.")
self.D = D
def _set_bounds(self, lb, ub):
if lb is None:
lb = np.NINF * np.ones(self.D)
elif isinstance(lb, REAL_NUMERIC_TYPES):
lb = np.array([lb])
if ub is None:
ub = np.PINF * np.ones(self.D)
elif isinstance(ub, REAL_NUMERIC_TYPES):
ub = np.array([ub])
if type(lb) is not np.ndarray:
raise TypeError(format_type_err_msg(self, "lb", lb, np.ndarray))
if type(ub) is not np.ndarray:
raise TypeError(format_type_err_msg(self, "ub", ub, np.ndarray))
lb_shape = lb.shape
if len(lb_shape) != 1:
raise ValueError("Lower bound lb must be vector.")
if lb_shape[0] != self.D:
raise ValueError("Lower bound lb does not have dimension D = %d." % self.D)
ub_shape = ub.shape
if len(ub_shape) != 1:
raise ValueError("Upper bound ub must be vector.")
if ub_shape[0] != self.D:
raise ValueError("Upper bound ub does not have dimension D = %d." % self.D)
for i in range(self.D):
if lb[i] > ub[i]:
raise ValueError(
"Parameter %s lower bound is greater than upper bound." % self.name
)
elif lb[i] == ub[i]:
raise ValueError(
"Parameter %s lower bound is equal to upper bound." % self.name
)
self.lb = lb
self.ub = ub
class Model(object):
"""Model to run emergent property inference on. To run EPI on a model:
#. Initialize an :obj:`epi.models.Model` with a list of :obj:`epi.models.Parameter`.
#. Use :obj:`epi.models.Model.set_eps` to set the emergent property statistics of the model.
#. Run emergent property inference for mean parameter :math:`\\mu` using :obj:`epi.models.Model.epi`.
:param name: Name of model.
:type name: str
:param parameters: List of :obj:`epi.models.Parameter`.
:type parameters: list
"""
def __init__(self, name, parameters):
self._set_name(name)
self._set_parameters(parameters)
self.eps = None
self.M_test = 200
self.M_norm = 200
def _set_name(self, name):
if type(name) is not str:
raise TypeError(format_type_err_msg(self, "name", name, str))
self.name = name
def _set_parameters(self, parameters):
if type(parameters) is not list:
raise TypeError(format_type_err_msg(self, parameters, "parameters", list))
for parameter in parameters:
if not parameter.__class__.__name__ == "Parameter":
raise TypeError(
format_type_err_msg(self, "parameter", parameter, Parameter)
)
if not self.parameter_check(parameters, verbose=True):
raise ValueError("Invalid parameter list.")
self.parameters = parameters
self.D = sum([param.D for param in parameters])
def set_eps(self, eps):
"""Set the emergent property statistic calculation for this model.
The arguments of eps should be batch vectors of univariate parameter
tensors following the naming convention in :obj:`self.Parameters`.
:param eps: Emergent property statistics function.
:type eps: function
"""
fullargspec = inspect.getfullargspec(eps)
args = fullargspec.args
_parameters = []
Ds = []
for arg in args:
found = False
for param in self.parameters:
if param.name == arg:
found = True
_parameters.append(param)
Ds.append(param.D)
self.parameters.remove(param)
break
if not found:
raise ValueError(
"Function eps has argument %s not in model parameter list." % arg
)
self.parameters = _parameters
def _eps(z):
ind = 0
zs = []
for D in Ds:
zs.append(z[:, ind : (ind + D)])
ind += D
return eps(*zs)
self.eps = _eps
self.eps.__name__ = eps.__name__
# Measure the eps dimensionality to populate self.m.
z = tf.ones((1, self.D))
T_z = self.eps(z)
T_z_shape = T_z.shape
if len(T_z_shape) != 2:
raise ValueError("Method eps must return tf.Tensor of dimension (N, D).")
self.m = T_z_shape[1]
return None
def _get_bounds(self,):
lb = np.zeros((self.D,))
ub = np.zeros((self.D,))
ind = 0
for param in self.parameters:
lb[ind : (ind + param.D)] = param.lb
ub[ind : (ind + param.D)] = param.ub
ind += param.D
return (lb, ub)
def epi(
self,
mu,
arch_type="coupling",
num_stages=3,
num_layers=2,
num_units=50,
elemwise_fn="affine",
batch_norm=False,
bn_momentum=0.0,
post_affine=True,
random_seed=1,
init_type=None, # "iso_gauss",
init_params=None, # {"loc": 0.0, "scale": 1.0},
K=10,
num_iters=1000,
N=500,
lr=1e-3,
c0=1.0,
gamma=0.25,
beta=4.0,
alpha=0.05,
nu=1.0,
stop_early=False,
log_rate=50,
verbose=False,
save_movie_data=False,
):
"""Runs emergent property inference for this model with mean parameter :math:`\\mu`.
:param mu: Mean parameter of the emergent property.
:type mu: np.ndarray
:param arch_type: :math:`\\in` :obj:`['autoregressive', 'coupling']`, defaults to :obj:`'coupling'`.
:type arch_type: str, optional
:param num_stages: Number of coupling or autoregressive stages, defaults to 3.
:type num_stages: int, optional
:param num_layers: Number of neural network layer per conditional, defaults to 2.
:type num_layers: int, optional
:param num_units: Number of units per layer, defaults to max(2D, 15).
:type num_units: int, optional
:type elemwise_fn: str, optional
:param elemwise_fn: Inter-stage bijector `\\in` :obj:`['affine', 'spline']`, defaults to 'affine'.
:param batch_norm: Use batch normalization between stages, defaults to True.
:type batch_norm: bool, optional
:param bn_momentum: Batch normalization momentum parameter, defaults to 0.99.
:type bn_momentrum: float, optional
:param post_affine: Shift and scale following main transform, defaults to False.
:type post_affine: bool, optional
:param random_seed: Random seed of architecture parameters, defaults to 1.
:type random_seed: int, optional
:param init_type: :math:`\\in` :obj:`['gaussian', 'abc']`.
:type init_type: str, optional
:param init_params: Parameters according to :obj:`init_type`.
:type init_params: dict, optional
:param K: Number of augmented Lagrangian iterations, defaults to 10.
:type K: int, float, optional
:param num_iters: Number of optimization iterations, defaults to 1000.
:type num_iters: int, optional
:param N: Number of batch samples per iteration, defaults to 500.
:type N: int, optional
:param lr: Adam optimizer learning rate, defaults to 1e-3.
:type lr: float, optional
:param c0: Initial augmented Lagrangian coefficient, defaults to 1.0.
:type c0: float, optional
:param gamma: Augmented lagrangian hyperparameter, defaults to 0.25.
:type gamma: float, optional
:param beta: Augmented lagrangian hyperparameter, defaults to 4.0.
:type beta: float, optional
:param alpha: P-value threshold for convergence testing, defaults to 0.05.
:type alpha: float, optional
:param nu: Fraction of N for convergence testing, defaults to 0.1.
:type nu: float, optional
:param stop_early: Exit if converged, defaults to False.
:type stop_early: bool, optional
:param log_rate: Record optimization data every so iterations, defaults to 100.
:type log_rate: int, optional
:param verbose: Print optimization information, defaults to False.
:type verbose: bool, optional
:param save_movie_data: Save data for making optimization movie, defaults to False.
:type save_movie_data: bool, optional
:returns: q_theta, opt_df, save_path, failed
:rtype: epi.models.Distribution, pandas.DataFrame, str, bool
"""
if num_units is None:
num_units = min(max(2 * self.D, 15), 100)
nf = NormalizingFlow(
arch_type=arch_type,
D=self.D,
num_stages=num_stages,
num_layers=num_layers,
num_units=num_units,
elemwise_fn=elemwise_fn,
batch_norm=batch_norm,
bn_momentum=bn_momentum,
post_affine=post_affine,
bounds=self._get_bounds(),
random_seed=random_seed,
)
# Hyperparameter object
aug_lag_hps = AugLagHPs(N, lr, c0, gamma, beta)
# Initialize architecture to gaussian.
print("Initializing %s architecture." % nf.to_string(), flush=True)
if init_type is None or init_type == "gaussian":
if init_params is None:
mu_init = np.zeros((self.D))
Sigma = np.zeros((self.D, self.D))
for i in range(self.D):
if np.isneginf(nf.lb[i]) and np.isposinf(nf.ub[i]):
mu_init[i] = 0.0
Sigma[i, i] = 1.0
elif np.isneginf(nf.lb[i]):
mu_init[i] = nf.ub[i] - 2.0
Sigma[i, i] = 1.0
elif np.isposinf(nf.ub[i]):
mu_init[i] = nf.lb[i] + 2.0
Sigma[i, i] = 1.0
else:
mu_init[i] = (nf.lb[i] + nf.ub[i]) / 2.0
Sigma[i, i] = np.square((nf.ub[i] - nf.lb[i]) / 4)
init_params = {"mu": mu_init, "Sigma": Sigma}
elif init_type == "abc":
if "num_keep" in init_params.keys():
num_keep = init_params["num_keep"]
else:
num_keep = 200
if "means" in init_params.keys():
means = init_params["means"]
else:
means = mu[: len(mu) // 2]
if "stds" in init_params.keys():
stds = init_params["stds"]
else:
stds = np.sqrt(mu[len(mu) // 2 :])
hash_str = get_hash([nf.lb, nf.ub])
abc_dir = os.path.join("data", "abc")
abc_fname = os.path.join(
abc_dir,
"M=%d_p=%.2f_std=%.3f_%s_abc.npz"
% (num_keep, means[0], stds[0], hash_str),
)
if os.path.exists(abc_fname):
print("Loading prev ABC.")
npzfile = np.load(abc_fname)
init_params = {"mu": npzfile["mu"], "Sigma": npzfile["Sigma"]}
else:
print("Running ABC!")
def accept_inds(T_x, means, stds):
acc = np.array(
[
np.logical_and(
means[i] - 2 * stds[i] < T_x[:, i],
T_x[:, i] < means[i] + 2 * stds[i],
)
for i in range(len(means))
]
)
return np.logical_and.reduce(acc, axis=0)
num_found = 0
z_abc = None
T_x_abc = None
while num_found < num_keep:
_z = np.zeros((N, self.D), dtype=np.float32)
for j in range(self.D):
_z[:, j] = np.random.uniform(
self.parameters[j].lb, self.parameters[j].ub, (N,)
)
_T_x = self.eps(_z).numpy()
inds = accept_inds(_T_x, means, stds)
_z = _z[inds, :]
_T_x = _T_x[inds, :]
num_found += _z.shape[0]
if z_abc is None:
z_abc = _z
T_x_abc = _T_x
else:
z_abc = np.concatenate((z_abc, _z), axis=0)
T_x_abc = np.concatenate((T_x_abc, _T_x), axis=0)
print("ABC for init: %d/%d\r" % (num_found, num_keep), end="")
mu_init = np.mean(z_abc, axis=0)
Sigma = np.eye(self.D)
if not os.path.exists(abc_dir):
os.mkdir(abc_dir)
np.savez(abc_fname, mu=mu_init, Sigma=Sigma)
init_params = {"mu": mu_init, "Sigma": Sigma}
nf.initialize(init_params["mu"], init_params["Sigma"], N=N, verbose=True)
# Checkpoint the initialization.
optimizer = tf.keras.optimizers.Adam(lr)
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=nf)
ckpt_dir, exists = self.get_epi_path(init_params, nf, mu, aug_lag_hps)
if exists:
print("Loading cached epi at %s." % ckpt_dir)
q_theta = self._get_epi_dist(-1, init_params, nf, mu, aug_lag_hps)
opt_df = pd.read_csv(os.path.join(ckpt_dir, "opt_data.csv"), index_col=0)
failed = (opt_df["cost"].isna()).sum() > 0
return q_theta, opt_df, ckpt_dir, failed
manager = tf.train.CheckpointManager(ckpt, directory=ckpt_dir, max_to_keep=None)
manager.save(checkpoint_number=0)
print("Saving EPI models to %s." % ckpt_dir, flush=True)
@tf.function
def train_step(eta, c):
with tf.GradientTape(persistent=True) as tape:
z, log_q_z = nf(N)
params = nf.trainable_variables
tape.watch(params)
H, R, R1s, R2 = aug_lag_vars(z, log_q_z, self.eps, mu, N)
neg_H = -H
lagrange_dot = tf.reduce_sum(tf.multiply(eta, R))
aug_l2 = c / 2.0 * tf.reduce_sum(tf.square(R))
cost = neg_H + lagrange_dot + aug_l2
H_grad = tape.gradient(neg_H, params)
lagrange_grad = tape.gradient(lagrange_dot, params)
aug_grad = unbiased_aug_grad(R1s, R2, params, tape)
gradients = [
g1 + g2 + c * g3 for g1, g2, g3 in zip(H_grad, lagrange_grad, aug_grad)
]
MAX_NORM = 1e10
gradients = [tf.clip_by_norm(g, MAX_NORM) for g in gradients]
optimizer.apply_gradients(zip(gradients, params))
return cost, H, R, z, log_q_z
N_test = int(nu * N)
# Initialize augmented Lagrangian parameters eta and c.
eta, c = np.zeros((self.m,), np.float32), c0
etas, cs = np.zeros((K, self.m)), np.zeros((K,))
# Initialize optimization data frame.
z, log_q_z = nf(N)
H_0, R_0, _, _ = aug_lag_vars(z, log_q_z, self.eps, mu, N)
cost_0 = -H_0 + np.dot(eta, R_0) + np.sum(np.square(R_0))
R_keys = ["R%d" % (i + 1) for i in range(self.m)]
opt_it_dfs = [
self._opt_it_df(
0, 0, H_0.numpy(), cost_0.numpy(), R_0.numpy(), log_rate, R_keys
)
]
# Record samples for movie.
if save_movie_data:
N_save = 200
zs = [z.numpy()[:N_save, :]]
log_q_zs = [log_q_z.numpy()[:N_save]]
# Measure initial R norm distribution.
mu_colvec = np_column_vec(mu).astype(np.float32).T
norms = get_R_norm_dist(nf, self.eps, mu_colvec, self.M_norm, N)
# EPI optimization
print(format_opt_msg(0, 0, cost_0, H_0, R_0, 0.0), flush=True)
failed = False
time_per_it = np.nan
epoch_times = []
for k in range(1, K + 1):
epoch_start = time.time()
etas[k - 1], cs[k - 1], eta, c
for i in range(1, num_iters + 1):
time1 = time.time()
cost, H, R, z, log_q_z = train_step(eta, c)
time2 = time.time()
if i % log_rate == 0:
time_per_it = time2 - time1
if verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
it = (k - 1) * num_iters + i
opt_it_dfs.append(
self._opt_it_df(
k, it, H.numpy(), cost.numpy(), R.numpy(), log_rate, R_keys
)
)
if save_movie_data:
zs.append(z.numpy()[:N_save, :])
log_q_zs.append(log_q_z.numpy()[:N_save])
if np.isnan(cost):
failed = True
if verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
it = (k - 1) * num_iters + i
opt_it_dfs.append(
self._opt_it_df(
k, it, H.numpy(), cost.numpy(), R.numpy(), log_rate, R_keys
)
)
print("NaN in EPI optimization. Exiting.")
break
if not verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
# Save epi optimization data following aug lag iteration k.
opt_it_df = pd.concat(opt_it_dfs)
manager.save(checkpoint_number=k)
if failed:
converged = False
else:
R_means = get_R_mean_dist(nf, self.eps, mu_colvec, self.M_test, N_test)
converged = self.test_convergence(R_means.numpy(), alpha)
last_ind = opt_it_df["iteration"] == k * num_iters
opt_it_df.loc[last_ind, "converged"] = converged
self._save_epi_opt(ckpt_dir, opt_it_df, cs, etas)
opt_it_dfs = [opt_it_df]
end_opt = False
if k < K:
if np.isnan(cost):
end_opt = True
# Check for convergence if early stopping.
elif stop_early and converged:
print("Stopping early because converged!", flush=True)
end_opt = True
else:
# Update eta and c
eta = eta + c * R
norms_k = get_R_norm_dist(nf, self.eps, mu_colvec, self.M_norm, N)
t, p = ttest_ind(
norms_k.numpy(), gamma * norms.numpy(), equal_var=False
)
u = np.random.rand(1)
if u < 1 - p / 2.0 and t > 0.0:
c = beta * c
norms = norms_k
time_per_it = time2 - time1
epoch_end = time.time()
epoch_times.append(epoch_end - epoch_start)
if save_movie_data:
np.savez(
os.path.join(ckpt_dir, "movie_data.npz"),
zs=np.array(zs),
log_q_zs=np.array(log_q_zs),
time_per_it=time_per_it,
epoch_times=np.array(epoch_times),
iterations=np.arange(0, k * num_iters + 1, log_rate),
)
else:
np.savez(
os.path.join(ckpt_dir, "timing.npz"),
epoch_times=epoch_times,
time_per_it=time_per_it,
)
if end_opt:
break
# Save hyperparameters.
self.aug_lag_hps = aug_lag_hps
# Return optimized distribution.
q_theta = Distribution(nf, self.parameters)
# q_theta.set_batch_norm_trainable(False)
return q_theta, opt_it_dfs[0], ckpt_dir, failed
def get_epi_df(self):
base_path = os.path.join("data", "epi", self.name)
next_listdir = [os.path.join(base_path, f) for f in os.listdir(base_path)]
init_paths = [f for f in next_listdir if os.path.isdir(f)]
dfs = []
for init_path in init_paths:
init = get_dir_index(os.path.join(init_path, "init.pkl"))
if init is None:
continue
next_listdir = [os.path.join(init_path, f) for f in os.listdir(init_path)]
arch_paths = [f for f in next_listdir if os.path.isdir(f)]
for arch_path in arch_paths:
arch = get_dir_index(os.path.join(arch_path, "arch.pkl"))
if arch is None:
continue
next_listdir = [
os.path.join(arch_path, f) for f in os.listdir(arch_path)
]
ep_paths = [f for f in next_listdir if os.path.isdir(f)]
for ep_path in ep_paths:
ep = get_dir_index(os.path.join(ep_path, "ep.pkl"))
if ep is None:
continue
next_listdir = [
os.path.join(ep_path, f) for f in os.listdir(ep_path)
]
AL_hp_paths = [f for f in next_listdir if os.path.isdir(f)]
for AL_hp_path in AL_hp_paths:
AL_hps = get_dir_index(os.path.join(AL_hp_path, "AL_hps.pkl"))
if AL_hps is None:
continue
opt_data_file = os.path.join(AL_hp_path, "opt_data.csv")
if os.path.exists(opt_data_file):
df = pd.read_csv(opt_data_file)
df["path"] = AL_hp_path
df["init"] = df.shape[0] * [init]
df["arch"] = df.shape[0] * [arch]
df["EP"] = df.shape[0] * [ep]
df["AL_hps"] = df.shape[0] * [AL_hps]
dfs.append(df)
return pd.concat(dfs)
def epi_opt_movie(self, path):
"""Generate video of EPI optimization.
:param path: Path to folder with optimization data.
:type param: str
"""
D = self.D
palette = sns.color_palette()
fontsize = 22
z_filename = os.path.join(path, "movie_data.npz")
opt_data_filename = os.path.join(path, "opt_data.csv")
# Load zs for optimization.
if os.path.exists(z_filename):
z_file = np.load(z_filename)
else:
raise IOError("File %s does not exist." % z_filename)
if os.path.exists(opt_data_filename):
opt_data_df = pd.read_csv(opt_data_filename)
else:
raise IOError("File %s does not exist." % opt_data_filename)
zs = z_file["zs"]
log_q_zs = z_file["log_q_zs"]
iters = z_file["iterations"]
N_frames = len(iters)
Hs = opt_data_df["H"]
if (len(Hs) < N_frames) or (
not np.isclose(iters, opt_data_df["iteration"][:N_frames]).all()
):
raise IOError("opt_data.csv incompatible with movie_data.npz.")
R_keys = []
for key in opt_data_df.columns:
if "R" in key:
R_keys.append(key)
m = len(R_keys)
R = opt_data_df[R_keys].to_numpy()
_iters = [iters[0]]
_Hs = [Hs[0]]
z = zs[0]
log_q_z = log_q_zs[0]
ylab_x = -0.075
ylab_y = 0.6
if not (self.name == "lds_2D"):
iter_rows = 3
# z_labels = [param.name for param in self.parameters]
z_labels = ["z%d" % d for d in range(1, self.D + 1)]
fig, axs = plt.subplots(D + iter_rows, D, figsize=(9, 12))
H_ax = plt.subplot(D + iter_rows, 1, 1)
else:
z_labels = [r"$a_{11}$", r"$a_{12}$", r"$a_{21}$", r"$a_{22}$"]
fig, axs = plt.subplots(4, 8, figsize=(16, 8))
H_ax = plt.subplot(4, 2, 1)
mode1s = []
mode2s = []
wsize = 100
# Entropy lines
x_end = 1.25 * iters[-1]
opt_y_shiftx = -0.05
num_iters = opt_data_df[opt_data_df["k"] == 1]["iteration"].max()
K = opt_data_df["k"].max()
log_rate = opt_data_df["iteration"][1]
xticks = num_iters * np.arange(K + 1)
H_ax.set_xlim(0, x_end)
min_H, max_H = np.min(Hs), np.max(Hs)
H_ax.set_ylim(min_H, max_H)
(H_line,) = H_ax.plot(_iters, _Hs, c=palette[0])
H_ax.set_ylabel(r"$H(q_\theta)$", rotation="horizontal", fontsize=fontsize)
H_ax.yaxis.set_label_coords(ylab_x + opt_y_shiftx, ylab_y)
H_ax.set_xticks(xticks)
H_ax.set_xticklabels(len(xticks) * [""])
H_ax.spines["bottom"].set_bounds(0, iters[-1])
H_ax.spines["right"].set_visible(False)
H_ax.spines["top"].set_visible(False)
# Constraint lines
if not (self.name == "lds_2D"):
R_ax = plt.subplot(D + iter_rows, 1, 2)
else:
R_ax = plt.subplot(4, 2, 3)
R_ax.set_xlim(0, iters[-1])
R_ind1 = len(iters) // 10
print(R_ind1, R.shape)
min_R, max_R = np.min(R[R_ind1:, :]), np.max(R[R_ind1:, :])
R_ax.set_xlim(0, x_end)
R_ax.set_ylim(min_R, max_R)
R_ax.set_ylabel(r"$R(q_\theta)$", rotation="horizontal", fontsize=fontsize)
R_ax.yaxis.set_label_coords(ylab_x + opt_y_shiftx, ylab_y)
R_ax.set_xlabel("iterations", fontsize=(fontsize - 2))
R_ax.set_xticks(xticks)
xticklabels = ["0"] + ["%dk" % int(xtick / 1000) for xtick in xticks[1:]]
R_ax.set_xticklabels(xticklabels, fontsize=(fontsize - 4))
R_ax.spines["bottom"].set_bounds(0, iters[-1])
R_ax.spines["right"].set_visible(False)
R_ax.spines["top"].set_visible(False)
if not (self.name == "lds_2D"):
for j in range(D):
axs[2, j].axis("off")
else:
# Plot the matrices
def get_lds_2D_modes(z, log_q_z):
M = log_q_z.shape[0]
mode1 = np.logical_and(z[:, 1] > 0.0, z[:, 2] < 0)
if sum(mode1) == 0:
mode1 = np.zeros((2, 2))
else:
mode1_inds = np.arange(M)[mode1]
mode1_ind = mode1_inds[np.argmax(log_q_z[mode1])]
mode1 = np.reshape(z[mode1_ind], (2, 2))
mode2 = np.logical_and(z[:, 1] < 0.0, z[:, 2] > 0)
if sum(mode2) == 0:
mode2 = np.zeros((2, 2))
else:
mode2_inds = np.arange(M)[mode2]
mode2_ind = mode2_inds[np.argmax(log_q_z[mode2])]
mode2 = np.reshape(z[mode2_ind], (2, 2))
return mode1, mode2
sqmat_xlims1 = [-0.2, 1.25]
sqmat_xlims2 = [-0.05, 1.4]
sqmat_ylims = [-0.05, 1.4]
mode1, mode2 = get_lds_2D_modes(z, log_q_z)
mode1s.append(mode1)
mode2s.append(mode2)
lw = 5
gray = 0.4 * np.ones(3)
bfrac = 0.05
mat_ax = plt.subplot(2, 4, 5)
texts = plot_square_mat(
mat_ax,
mode1,
c=gray,
lw=lw,
fontsize=24,
bfrac=bfrac,
title="mode 1",
xlims=sqmat_xlims1,
ylims=sqmat_ylims,
text_c=palette[1],
)
mat_ax = plt.subplot(2, 4, 6)
texts += plot_square_mat(
mat_ax,
mode2,
c=gray,
lw=lw,
fontsize=24,
bfrac=bfrac,
title="mode 2",
xlims=sqmat_xlims2,
ylims=sqmat_ylims,
text_c=palette[3],
)
mode1_vec = np.reshape(mode1, (4,))
mode2_vec = np.reshape(mode2, (4,))
R_lines = []
_Rs = []
for i in range(m):
_Rs.append([R[0, i]])
(R_line,) = R_ax.plot(
_iters, _Rs[i], label=R_keys[i], c="k"
) # palette[i + 1])
R_lines.append(R_line)
# R_ax.legend(loc=9)
lines = [H_line] + R_lines
# Get axis limits
ax_mins = []
ax_maxs = []
lb, ub = self._get_bounds()
for i in range(D):
if np.isneginf(lb[i]):
ax_mins.append(np.min(zs[:, :, i]))
else:
ax_mins.append(lb[i])
if np.isposinf(ub[i]):
ax_maxs.append(np.max(zs[:, :, i]))
else:
ax_maxs.append(ub[i])
# Collect scatters
cmap = plt.get_cmap("viridis")
scats = []
if not (self.name == "lds_2D"):
scat_i = iter_rows
scat_j = 0
else:
scat_i = 0
scat_j = 4
for i in range(D - 1):
for j in range(i + 1, D):
ax = axs[i + scat_i][j + scat_j]
scats.append(ax.scatter(z[:, j], z[:, i], c=log_q_zs[0], cmap=cmap))
ax.set_xlim(ax_mins[j], ax_maxs[j])
ax.set_ylim(ax_mins[i], ax_maxs[i])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
kdes = []
conts = []
kde_scale_fac = 0.1
nlevels = 10
num_grid = 20
for i in range(1, D):
ax_len_i = ax_maxs[i] - ax_mins[i]
grid_ys = np.linspace(ax_mins[i], ax_maxs[i], num_grid)
for j in range(i):
ax_len_j = ax_maxs[j] - ax_mins[j]
grid_xs = np.linspace(ax_mins[j], ax_maxs[j], num_grid)
ax = axs[i + scat_i][j + scat_j]
kde = KernelDensity(
kernel="gaussian",
bandwidth=kde_scale_fac * (ax_len_i + ax_len_j) / 2.0,
)
_z = z[:, [j, i]]
kde.fit(_z, log_q_z)
z_grid = np.meshgrid(grid_xs, grid_ys)
z_grid_mat = np.stack(
[
np.reshape(z_grid[0], (num_grid ** 2)),
np.reshape(z_grid[1], (num_grid ** 2)),
],
axis=1,
)
scores_ij = kde.score_samples(z_grid_mat)
scores_ij = np.reshape(scores_ij, (num_grid, num_grid))
levels = np.linspace(np.min(scores_ij), np.max(scores_ij), 20)
cont = ax.contourf(z_grid[0], z_grid[1], scores_ij, levels=levels)
conts.append(cont)
ax.set_xlim(ax_mins[j], ax_maxs[j])
ax.set_ylim(ax_mins[i], ax_maxs[i])
kdes.append(kde)
for i in range(D):
axs[i + scat_i][scat_j].set_ylabel(
z_labels[i], rotation="horizontal", fontsize=fontsize
)
axs[i + scat_i][scat_j].yaxis.set_label_coords(D * ylab_x, ylab_y)
axs[-1][i + scat_j].set_xlabel(z_labels[i], fontsize=fontsize)
axs[i + scat_i][i + scat_j].set_xlim(ax_mins[i], ax_maxs[i])
axs[i + scat_i][i + scat_j].set_ylim(ax_mins[i], ax_maxs[i])
axs[i + scat_i][i + scat_j].spines["right"].set_visible(False)
axs[i + scat_i][i + scat_j].spines["top"].set_visible(False)
# Plot modes
if self.name == "lds_2D":
for i in range(D - 1):
for j in range(i + 1, D):
(line,) = axs[i + scat_i, j + scat_j].plot(
mode1_vec[j], mode1_vec[i], "o", c=palette[1]
)
lines.append(line)
(line,) = axs[i + scat_i, j + scat_j].plot(
mode2_vec[j], mode2_vec[i], "o", c=palette[3]
)
lines.append(line)
# Tick labels
for i in range(D):
for j in range(1, D):
axs[i + scat_i, j + scat_j].set_yticklabels([])
for i in range(D - 1):
for j in range(D):
axs[i + scat_i, j + scat_j].set_xticklabels([])
def update(frame):
_iters.append(iters[frame])
_Hs.append(Hs[frame])
for i in range(m):
_Rs[i].append(R[frame, i])
z = zs[frame]
log_q_z = log_q_zs[frame]
cvals = log_q_z - np.min(log_q_z)
cvals = cvals / np.max(cvals)
# Update entropy plot
lines[0].set_data(_iters, _Hs)
for i in range(m):
lines[i + 1].set_data(_iters, _Rs[i])
# Update modes.
if self.name == "lds_2D":
mode1, mode2 = get_lds_2D_modes(z, log_q_z)
mode1s.append(mode1)
mode2s.append(mode2)
mode1_avg = np.mean(np.array(mode1s)[-wsize:, :], axis=0)
mode2_avg = np.mean(np.array(mode2s)[-wsize:, :], axis=0)
mode1_vec = np.reshape(mode1_avg, (4,))
mode2_vec = np.reshape(mode2_avg, (4,))
ind = 0
for i in range(2):
for j in range(2):
texts[ind].set_text("%.1f" % mode1_avg[i, j])
texts[ind + 4].set_text("%.1f" % mode2_avg[i, j])
ind += 1
ind = 0
for i in range(D - 1):
for j in range(i + 1, D):
lines[1 + m + ind].set_data(mode1_vec[j], mode1_vec[i])
ind += 1
lines[1 + m + ind].set_data(mode2_vec[j], mode2_vec[i])
ind += 1
# Update scatters
_ind = 0
for i in range(D - 1):
for j in range(i + 1, D):
scats[_ind].set_offsets(
|
np.stack((z[:, j], z[:, i]), axis=1)
|
numpy.stack
|
#!/usr/bin/python
# Imports
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
# Science
import numpy as np
import scipy.stats as stats
import pandas as pd
# Plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
################################## FUNCTIONS ############################
# Population time-series
def population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):
'''
Function that plots a population level time series embedding of cycle and period lengths
In plot:
x axis is length_attribute for cycle 1,
y axis is length attribute for cycle 2,
z is for cycle 3
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user
save_dir: path where to save plot
Output:
None
'''
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Filename
if sample_style == 'first':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
if sample_style == 'random':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):
print('Start selecting cycles for one group')
if sample_style=='first':
sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]
if sample_style=='random':
sample_cycle_lengths = []
for cycle_length in cycle_lengths:
if len(cycle_length) >= 3:
num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)
start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]
sample_cycle_lengths.append(cycle_length[start_index:start_index+3])
print('Finished selecting cycles for one group')
print('Start plotting one group')
for i in range(len(sample_cycle_lengths)):
xs = sample_cycle_lengths[i][0]
ys = sample_cycle_lengths[i][1]
zs = sample_cycle_lengths[i][2]
# Plot this point
ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)
print('Finished plotting one group')
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
# Add (a)/(b) labels for paper
ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Time series embedding for a randomly chosen user
def random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group
In plot:
x axis is length_attribute for cycle i,
y axis is length attribute for cycle i+1,
z is for cycle i+2
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Select users with median number of cycles tracked
cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]
filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Randomly pick a user from each group
cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)
cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plot each user, color by median intercycle length
xs = list(cycle_lengths_greater_than_user[0][0:-2])
ys = list(cycle_lengths_greater_than_user[0][1:-1])
zs = list(cycle_lengths_greater_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'orange')
ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)
xs = list(cycle_lengths_less_than_user[0][0:-2])
ys = list(cycle_lengths_less_than_user[0][1:-1])
zs = list(cycle_lengths_less_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'c')
ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Plot period and cycle length distributions per group
def plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):
'''
Function that plots cycle and period length distributions across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
if attribute == 'cycle_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Cycle length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Cycle length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
max_period_days=28
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Period length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Period length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Bootstrapped-KS for cycle and period length
def bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):
'''
Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about user's cycle
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
n_bootstrapping: Number of bootstrapped samples to use for the analysis
save_dir: path where to save plot
Output:
None
'''
# True separation of users into groups
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
########### TRUE OBSERVERD STATISTICS ##########
# Cycles per-group
true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]
true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]
# KS cycle_length
true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())
########### BOOTSTRAP BASED STATISTICS ##########
# Computed suff statistics
bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_KS_period_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)
for n_bootstrap in np.arange(n_bootstrapping):
#print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))
# Bootstrapped sample indicators
bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)
bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)
# Cycles per-group
bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]
bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]
# KS cycle_length
bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())
# Print bootstrap results
print('*************************************************************************')
print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))
print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()
))
print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),
bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)
))
print('*************************************************************************')
print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))
print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()
))
print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),
bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)
))
print('*************************************************************************')
# Average statistics over cycle-id
def plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
# Plotting
colors = ['slateblue', 'c', 'orange']
max_cycle_id=20
if attribute == 'cycle_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(
|
np.unique(dataset['cycle_id'])
|
numpy.unique
|
import tensorflow as tf
import tensorflow_probability as tfp
# from tensorflow.core.protobuf import config_pb2
import numpy as np
# import os
# from fit_model import load_data
import matplotlib.pyplot as plt
import time
import numbers
import pandas as pd
import tf_keras_tfp_lbfgs as funfac
from dotenv import load_dotenv
import os
import requests
from datetime import datetime, timedelta
# for the file selection dialogue (see https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook)
import traitlets
from ipywidgets import widgets
from IPython.display import display
from tkinter import Tk, filedialog
class SelectFilesButton(widgets.Button):
"""A file widget that leverages tkinter.filedialog."""
# see https: // codereview.stackexchange.com / questions / 162920 / file - selection - button - for -jupyter - notebook
def __init__(self, out, CallBack=None,Load=True):
super(SelectFilesButton, self).__init__()
# Add the selected_files trait
self.add_traits(files=traitlets.traitlets.List())
# Create the button.
if Load:
self.description = "Load"
else:
self.description = "Save"
self.isLoad=Load
self.icon = "square-o"
self.style.button_color = "orange"
# Set on click behavior.
self.on_click(self.select_files)
self.CallBack = CallBack
self.out = widgets.Output()
@staticmethod
def select_files(b):
"""Generate instance of tkinter.filedialog.
Parameters
----------
b : obj:
An instance of ipywidgets.widgets.Button
"""
with b.out:
try:
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected files will be set to b.value
if b.isLoad:
filename = filedialog.askopenfilename() # multiple=False
else:
filename = filedialog.asksaveasfilename()
# print('Load/Save Dialog finished')
#b.description = "Files Selected"
#b.icon = "check-square-o"
#b.style.button_color = "lightgreen"
if b.CallBack is not None:
#print('Invoking CallBack')
b.CallBack(filename)
#else:
#print('no CallBack')
except:
#print('Problem in Load/Save')
#print('File is'+b.files)
pass
cumulPrefix = '_cumul_' # this is used as a keyword to identify whether this plot was already plotted
def getNumArgs(myFkt):
from inspect import signature
sig = signature(myFkt)
return len(sig.parameters)
class DataLoader(object):
def __init__(self):
load_dotenv()
def pull_data(self, uri='http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data'):
return requests.get(uri).json()
# return requests.get('http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/daily_data').json()
def get_new_data(self):
uri = "http://ec2-3-122-224-7.eu-central-1.compute.amazonaws.com:8080/data"
json_data = self.pull_data(uri)
table = np.array(json_data["rows"])
column_names = []
for x in json_data["fields"]:
column_names.append(x["name"])
df = pd.DataFrame(table, columns=column_names)
df["day"] = [datetime.fromtimestamp(x["$date"] / 1000) for x in df["day"].values]
df["id"] = df["latitude"].apply(lambda x: str(x)) + "_" + df["longitude"].apply(lambda x: str(x))
unique_ids = df["id"].unique()
regions = {}
for x in unique_ids:
regions[x] = {}
regions[x]["data_fit"] = df[df["id"] == x]
return regions, df
NumberTypes = (int, float, complex, np.ndarray, np.generic)
# The aim is to build a SEIR (Susceptible → Exposed → Infected → Removed)
# Model with a number of (fittable) parameters which may even vary from
# district to district
# The basic model is taken from the webpage
# https://gabgoh.github.io/COVID/index.html
# and the implementation is done in Tensorflow 1.3
# The temporal dimension is treated by unrolling the loop
CalcFloatStr = 'float32'
if False:
defaultLossDataType = "float64"
else:
defaultLossDataType = "float32"
defaultTFDataType = "float32"
defaultTFCpxDataType = "complex64"
def addDicts(dict1, dict2):
"""Merge dictionaries and keep values of common keys in list"""
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
val2 = dict1[key]
if equalShape(value.shape, val2.shape):
dict3[key] = value + val2
else:
print('Shape 1: ' + str(value.shape) + ", shape 2:" + str(val2.shape))
raise ValueError('Shapes of transfer values to add are not the same')
return dict3
def Init(noCuda=False):
"""
initializes the tensorflow system
"""
if noCuda is True:
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
tf.compat.v1.reset_default_graph() # currently just to shield tensorflow from the main program
# Init()
### tf.compat.v1.disable_eager_execution()
# sess = tf.compat.v1.Session()
# tf.device("/gpu:0")
# Here some code from the inverse Modeling Toolbox (<NAME>)
def iterativeOptimizer(myTFOptimization, NIter, loss, verbose=False):
if NIter <= 0:
raise ValueError("NIter has to be positive")
for n in range(NIter):
myTFOptimization() # summary?
myloss = loss().numpy()
if np.isnan(myloss):
raise ValueError("Loss is NaN. Aborting iteration.")
if verbose:
print(str(n) + "/" + str(NIter) + ": " + str(myloss))
return myloss # , summary
def optimizer(loss, otype='L-BFGS-B', NIter=300, oparam={'gtol': 0, 'learning_rate': None}, var_list=None, verbose=False):
"""
defines an optimizer to be used with "Optimize"
This function combines various optimizers from tensorflow and SciPy (with tensorflow compatibility)
Parameters
----------
loss : the loss function, which is a tensor that has been initialized but contains variables
otype (default: L-BFGS : The method of optimization to be used the following options exist:
from Tensorflow:
sgrad
nesterov
adadelta
adam
proxgrad
and from SciPy all the optimizers in the package tf.contrib.opt.ScipyOptimizerInterface
NIter (default: 300) : Number of iterations to be used
oparam : a dictionary to be passed to the detailed optimizers containing optimization parameters (e.g. "learning-rate"). See the individual documentation
var_list (default: None meaning all) : list of tensorflow variables to be used during minimization
verbose (default: False) : prints the loss during iterations if True
Returns
-------
an optimizer funtion (or lambda function)
See also
-------
Example
-------
"""
if NIter < 0:
raise ValueError("NIter has to be positive or zero")
optimStep = 0
if (var_list is not None) and not np.iterable(var_list):
var_list = [var_list]
# these optimizer types work strictly stepwise
elif otype == 'SGD':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.00003
print("setting up sgrad optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.SGD(learning_rate).minimize(loss, var_list=var_list) # 1.0
elif otype == 'nesterov':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.00002
print("setting up nesterov optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.SGD(learning_rate, nesterov=True, momentum=1e-4).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adam':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0013
print("setting up adam optimization with ", NIter, " iterations, learning_rate: ", learning_rate, ".")
optimStep = lambda loss: tf.keras.optimizers.Adam(learning_rate, 0.9, 0.999).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adadelta':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0005
print("setting up adadelta optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.Adadelta(learning_rate, 0.9, 0.999).minimize(loss, var_list=var_list) # 1.0
elif otype == 'adagrad':
learning_rate = oparam["learning_rate"]
if learning_rate == None:
learning_rate = 0.0012
print("setting up adagrad optimization with ", NIter, " iterations.")
optimStep = lambda loss: tf.keras.optimizers.Adagrad(learning_rate).minimize(loss, var_list=var_list) # 1.0
if optimStep != 0:
myoptim = lambda: optimStep(loss)
myOptimizer = lambda: iterativeOptimizer(myoptim, NIter, loss, verbose=verbose)
# these optimizers perform the whole iteration
elif otype == 'L-BFGS':
# normFac = None
# if "normFac" in oparam: # "max", "mean" or None
# normFac = oparam["normFac"]
func = funfac.function_factory(loss, var_list) # normFactors=normFac
# convert initial model parameters to a 1D tf.Tensor
init_params = func.initParams() # retrieve the (normalized) initialization parameters
# use the L-BFGS solver
myOptimizer = lambda: LBFGSWrapper(func, init_params, NIter)
# myOptimizer = lambda: tfp.optimizer.lbfgs_minimize(value_and_gradients_function=func,
# initial_position=init_params,
# tolerance=1e-8,
# max_iterations=NIter)
# # f_relative_tolerance = 1e-6,
else:
raise ValueError('Unknown optimizer: ' + otype)
return myOptimizer # either an iterative one or 'L-BFGS'
def LBFGSWrapper(func, init_params, NIter):
optim_results = tfp.optimizer.lbfgs_minimize(value_and_gradients_function=func,
initial_position=init_params,
tolerance=1e-7,
num_correction_pairs=5,
max_iterations=NIter)
# f_relative_tolerance = 1e-6
# converged, failed, num_objective_evaluations, final_loss, final_gradient, position_deltas, gradient_deltas
if not optim_results.converged:
tf.print("WARNING: optimization did not converge")
if optim_results.failed:
tf.print("WARNING: lines search failed during iterations")
res = optim_results.position
func.assign_new_model_parameters(res)
return optim_results.objective_value
def doNormalize(val, normalize, reference):
if normalize == "max":
val = val * tf.reduce_max(reference)
elif normalize == "mean":
val = val * tf.reduce_mean(reference)
return val
def invNormalize(val, normalize, reference):
if normalize == "max":
val = val / tf.reduce_max(reference)
elif normalize == "mean":
val = val / tf.reduce_mean(reference)
return val
@tf.custom_gradient
def monotonicPos(val, b2=1.0): # can also be called forcePositive
"""
applies a monotonic transform mapping the full real axis to the positive half space
This can be used to implicitely force the reconstruction results to be all-positive. The monotonic function is derived from a hyperboloid:
The function is continues and differentiable.
This function can also be used as an activation function for neural networks.
Parameters
----------
val : tensorflow array
The array to be transformed
Returns
-------
tensorflow array
The transformed array
Example
-------
"""
mysqrt = tf.sqrt(b2 + tf.square(val) / 4.0)
def grad(dy):
return dy * (0.5 + val / mysqrt / 4.0), None # no abs here!
# return mysqrt + val / 2.0, grad # This is the original simple equation, but it is numerically very unstable for small numbers!
# slightly better but not good:
# return val * (0.5 + tf.sign(val) * tf.sqrt(b2/tf.square(val)+0.25)), grad
taylor1 = b2 / (2.0 * mysqrt)
diff = val / 2.0 + mysqrt # for negative values this is a difference
# print('diff: ' + str(diff)+", val"+str(val)+" taylor:"+str(taylor1))
# if tf.abs(diff/val) < 2e-4: # this seems a good compromise between finite subtraction and taylor series
Order2N = val * tf.where(tf.abs(diff / val) < 2e-4, taylor1, diff)
p = taylor1 + (b2 + Order2N) / (2.0 * mysqrt), grad # this should be numerically more stable
return p
# This monotonic positive function is based on a Hyperbola modified that one of the branches appraoches zero and the other one reaches a slope of one
def invMonotonicPos(invinput, b2=1.0, Eps=0.0):
# a constant value > 0.0 0 which regulates the shape of the hyperbola. The bigger the smoother it becomes.
tfinit = tf.clip_by_value(invinput, clip_value_min=tf.constant(Eps, dtype=CalcFloatStr),
clip_value_max=tf.constant(np.Inf, dtype=CalcFloatStr)) # assertion to obtain only positive input for the initialization
# return tf.cast(tfinit - (tf.constant(b2) / tfinit), dtype=CalcFloatStr) # the inverse of monotonicPos
return (tf.square(tfinit) - b2) / tfinit # the inverse of monotonicPos
# def piecewisePos(res):
# mask = res>=0
# mask2 = ~mask
# res2 = 1.0 / (1.0-res(mask2))
# res(mask2) = res2; # this hyperbola has a value of 1, a slope of 1 and a curvature of 2 at zero X
# res(mask) = abssqr(res(mask)+0.5)+0.75 # this parabola has a value of 1, a slope of 1 and a curvature of 2 at zero X
# def invPiecewisePos(invinput):
# mask=model >= 1.0
# mask2 = ~mask
# res2=model * 0.0
# res2(mask) = sqrt(model(mask) - 0.75)-0.5
# res2(mask2) = (model(mask2)-1.0) / model(mask2)
# res = afkt(res2) # the inverse of monotonicPos
# def forcePositive(self, State):
# for varN, var in State.items():
# State[varN] = self.monotonicPos(State[varN])
# return State
# def Reset():
# tf.compat.v1.reset_default_graph() # clear everything on the GPU
# def Optimize(Fwd,Loss,tfinit,myoptimizer=None,NumIter=40,PreFwd=None):
def Optimize(myoptimizer=None, loss=None, NumIter=40, TBSummary=False, TBSummaryDir="C:\\NoBackup\\TensorboardLogs\\", resVars=None, lossScale=1.0):
"""
performs the tensorflow optimization given a loss function and an optimizer
The optimizer currently also needs to know about the loss, which is a (not-yet evaluated) tensor
Parameters
----------
myoptimizer : an optimizer. See for example "optimizer" and its arguments
loss : the loss() function with no arguments
NumIter (default: 40) : Number of iterations to be used, in case that no optimizer is provided. Otherwise this argument is NOT used but the optimizer knows about the number of iterations.
TBSummary (default: False) : If True, the summary information for tensorboard is stored
TBSummaryDir (default: "C:\\NoBackup\\TensorboardLogs\\") : The directory whre the tensorboard information is stored.
Eager (default: False) : Use eager execution
resVars (default: None) : Which tensors to evaluate and return at the end.
Returns
-------
a tuple of tensors
See also
-------
Example
-------
"""
if myoptimizer is None:
myoptimizer = lambda loss: optimizer(loss, NIter=NumIter) # if none was provided, use the default optimizer
if loss != None:
mystartloss = loss().numpy() * lossScale # eval()
start_time = time.time()
if TBSummary:
summary = myoptimizer()
else:
myoptimizer()
duration = time.time() - start_time
# if TBSummary:
# tb_writer = tf.summary.FileWriter(TBSummaryDir + 'Optimize', session.graph)
# merged = tf.summary.merge_all()
# summary = session.run(merged)
# tb_writer.add_summary(summary, 0)
try:
optName = myoptimizer.optName
except:
optName = "unkown optimizer"
if loss != None:
myloss = loss().numpy() * lossScale
print(optName + ': Exec. time:{:.4}'.format(duration), '. Start L.:{:.4}'.format(mystartloss), ', Final L.:{:.4}'.format(myloss),
'. Relative L.:{:.4}'.format(myloss / mystartloss))
else:
print(optName + ': Exec. time:{:.4}'.format(duration))
if resVars == None and loss != None:
return myloss
else:
res = []
if isinstance(resVars, list) or isinstance(resVars, tuple):
for avar in resVars:
if not isinstance(avar, tf.Tensor) and not isinstance(avar, tf.Variable):
print("WARNING: Variable " + str(avar) + " is NOT a tensor.")
res.append(avar)
else:
try:
res.append(avar.eval())
except ValueError:
print("Warning. Could not evaluate result variable" + avar.name + ". Returning [] for this result.")
res.append([])
else:
res = resVars.eval()
return res
# nip.view(toshow)
def datatype(tfin):
if istensor(tfin):
return tfin.dtype
else:
if isinstance(tfin, np.ndarray):
return tfin.dtype.name
return tfin # assuming this is already the type
def istensor(tfin):
return isinstance(tfin, tf.Tensor) or isinstance(tfin, tf.Variable)
def iscomplex(mytype):
mytype = str(datatype(mytype))
return (mytype == "complex64") or (mytype == "complex128") or (mytype == "complex64_ref") or (mytype == "complex128_ref") or (mytype == "<dtype: 'complex64'>") or (
mytype == "<dtype: 'complex128'>")
def isNumber(val):
return isinstance(val, numbers.Number)
def isList(val):
return isinstance(val, list)
def isTuple(val):
return isinstance(val, tuple)
def removeCallable(ten):
if callable(ten):
return ten()
else:
return ten
def totensor(img):
if istensor(img) or callable(img):
return img
if isList(img):
img = np.array(img, CalcFloatStr)
if not isNumber(img) and ((img.dtype == defaultTFDataType) or (img.dtype == defaultTFCpxDataType)):
img = tf.constant(img)
else:
if iscomplex(img):
img = tf.constant(img, defaultTFCpxDataType)
else:
img = tf.constant(img, defaultTFDataType)
return img
def doCheckScaling(fwd, meas):
sF = tf.reduce_mean(input_tensor=totensor(fwd)).numpy()
sM = tf.reduce_mean(input_tensor=totensor(meas)).numpy()
R = sM / sF
if abs(R) < 0.7 or abs(R) > 1.3:
print("Mean of measured data: " + str(sM) + ", Mean of forward model with initialization: " + str(sF) + " Ratio: " + str(R))
print(
"WARNING!! The forward projected sum is significantly different from the provided measured data. This may cause problems during optimization. To prevent this warning: set checkScaling=False for your loss function.")
return tf.debugging.check_numerics(fwd, "Detected NaN or Inf in loss function") # also checks for NaN values during runtime
def Loss_SimpleGaussian(fwd, meas, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
with tf.compat.v1.name_scope('Loss_SimpleGaussian'):
# return tf.reduce_sum(tf.square(fwd-meas)) # version without normalization
return tf.reduce_mean(
input_tensor=tf.cast(tf.square(fwd - meas), lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
# %% this section defines a number of loss functions. Note that they often need fixed input arguments for measured data and sometimes more parameters
def Loss_FixedGaussian(fwd, meas, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
with tf.compat.v1.name_scope('Loss_FixedGaussian'):
# return tf.reduce_sum(tf.square(fwd-meas)) # version without normalization
if iscomplex(fwd.dtype.as_numpy_dtype):
mydiff = (fwd - meas)
return tf.reduce_mean(input_tensor=tf.cast(mydiff * tf.math.conj(mydiff), lossDataType)) / \
tf.reduce_mean(input_tensor=tf.cast(meas, lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
else:
return tf.reduce_mean(input_tensor=tf.cast(tf.square(fwd - meas), lossDataType)) / tf.reduce_mean(
input_tensor=tf.cast(meas, lossDataType)) # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
def Loss_ScaledGaussianReadNoise(fwd, meas, RNV=1.0, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
offsetcorr = tf.cast(tf.reduce_mean(tf.math.log(tf.math.maximum(meas, tf.constant(0.0, dtype=CalcFloatStr)) + RNV)),
lossDataType) # this was added to have the ideal fit yield a loss equal to zero
# with tf.compat.v1.name_scope('Loss_ScaledGaussianReadNoise'):
XMinusMu = tf.cast(meas - fwd, lossDataType)
muPlusC = tf.cast(tf.math.maximum(fwd, 0.0) + RNV, lossDataType) # the clipping at zero was introduced to avoid division by zero
# if tf.reduce_any(RNV == tf.constant(0.0, CalcFloatStr)):
# print("RNV is: "+str(RNV))
# raise ValueError("RNV is zero!.")
# if tf.reduce_any(muPlusC == tf.constant(0.0, CalcFloatStr)):
# print("Problem: Division by zero encountered here")
# raise ValueError("Division by zero HERE!.")
Fwd = tf.math.log(muPlusC) + tf.square(XMinusMu) / muPlusC
# Grad=Grad.*(1.0-2.0*XMinusMu-XMinusMu.^2./muPlusC)./muPlusC;
Fwd = tf.reduce_mean(input_tensor=Fwd)
# if tf.math.is_nan(Fwd):
# if tf.reduce_any(muPlusC == tf.constant(0.0, CalcFloatStr)):
# print("Problem: Division by zero encountered")
# raise ValueError("Division by zero.")
# else:
# raise ValueError("Nan encountered.")
return Fwd # - offsetcorr # to make everything scale-invariant. The TF framework hopefully takes care of precomputing this
# @tf.custom_gradient
def Loss_Poisson(fwd, meas, Bg=0.05, checkPos=False, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
with tf.compat.v1.name_scope('Loss_Poisson'):
# meas[meas<0]=0
meanmeas = tf.reduce_mean(meas)
# NumEl=tf.size(meas)
if checkPos:
fwd = ((tf.sign(fwd) + 1) / 2) * fwd
FwdBg = tf.cast(fwd + Bg, lossDataType)
totalError = tf.reduce_mean(input_tensor=(FwdBg - meas) - meas * tf.math.log(
(FwdBg) / (meas + Bg))) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
# totalError = tf.reduce_mean((fwd-meas) - meas * tf.log(fwd)) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
# def grad(dy):
# return dy*(1.0 - meas/(fwd+Bg))/meanmeas
# return totalError,grad
return totalError
def Loss_Poisson2(fwd, meas, Bg=0.05, checkPos=False, lossDataType=None, checkScaling=False):
if lossDataType is None:
lossDataType = defaultLossDataType
if checkScaling:
fwd = doCheckScaling(fwd, meas)
# with tf.compat.v1.name_scope('Loss_Poisson2'):
# meas[meas<0]=0
meanmeas = tf.reduce_mean(meas)
meassize = np.prod(meas.shape)
# NumEl=tf.size(meas)
if checkPos:
fwd = ((tf.sign(fwd) + 1) / 2) * fwd # force positive
# totalError = tf.reduce_mean((fwd-meas) - meas * tf.log(fwd)) / meanmeas # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
@tf.custom_gradient
def BarePoisson(myfwd):
def grad(dy):
mygrad = dy * (1.0 - meas / (myfwd + Bg)) / meassize # the size accounts for the mean operation (rather than sum)
# image_shaped_input = tf.reshape(mygrad, [-1, mygrad.shape[0], mygrad.shape[1], 1])
# tf.summary.image('mygrad', image_shaped_input, 10)
return mygrad
toavg = (myfwd + Bg - meas) - meas * tf.math.log((myfwd + Bg) / (meas + Bg))
toavg = tf.cast(toavg, lossDataType)
totalError = tf.reduce_mean(input_tensor=toavg) # the modification in the log normalizes the error. For full normalization see PoissonErrorAndDerivNormed
return totalError, grad
return BarePoisson(fwd) / meanmeas
# ---- End of code from the inverse Modelling Toolbox
def retrieveData():
import json_to_pandas
dl = json_to_pandas.DataLoader() # instantiate DataLoader #from_back_end=True
data_dict = dl.process_data() # loads and forms the data dictionary
rki_data = data_dict["RKI_Data"] # only RKI dataframe
print('Last Day loaded: ' + str(pd.to_datetime(np.max(rki_data.Meldedatum), unit='ms')))
return rki_data
def deltas(WhenHowMuch, SimTimes):
res = np.zeros(SimTimes)
for w, h in WhenHowMuch:
res[w] = h;
return res
def showResiduum(meas, fit):
res1 = np.mean(meas - fit, (1, 2))
print('Loss: ' + str(np.mean(abs(res1) ** 2)))
plt.plot(res1)
plt.xlabel('days')
plt.ylabel('mean difference / cases')
plt.title('residuum')
def plotAgeGroups(res1, res2):
plt.figure()
plt.title('Age Groups')
plt.plot(res1)
plt.gca().set_prop_cycle(None)
plt.plot(res2, '--')
plt.xlabel('days')
plt.ylabel('population')
class axisType:
const = 'const'
gaussian = 'gaussian'
sigmoid = 'sigmoid'
individual = 'individual'
uniform = 'uniform'
def prependOnes(s1, s2):
l1 = len(s1);
l2 = len(s2)
maxDim = max(l1, l2)
return np.array((maxDim - l1) * [1] + list(s1)), np.array((maxDim - l2) * [1] + list(s2))
def equalShape(s1, s2):
if isinstance(s1, tf.TensorShape):
s1 = s1.as_list()
if isinstance(s2, tf.TensorShape):
s2 = s2.as_list()
s1, s2 = prependOnes(s1, s2)
return np.linalg.norm(s1 - s2) == 0
class Axis:
def ramp(self):
x = self.shape
if isinstance(x, np.ndarray) or isNumber(x) or isTuple(x) or isList(x):
aramp = tf.constant(np.arange(np.max(x)), dtype=CalcFloatStr)
if isNumber(x):
x = [x]
x = tf.reshape(aramp, x) # if you get an error here, the size is not 1D!
else:
x = totensor(x)
return x
def __init__(self, name, numAxis, maxAxes, entries=1, queue=False, labels=None):
self.name = name
self.queue = queue
self.shape = np.ones(maxAxes, dtype=int)
self.shape[-numAxis] = entries
self.curAxis = numAxis
self.Labels = labels
# self.initFkt = self.initZeros()
def __str__(self):
return self.name + ", number:" + str(self.curAxis) + ", is queue:" + str(self.queue)
def __repr__(self):
return self.__str__()
# def initZeros(self):
# return tf.constant(0.0, dtype=CalcFloatStr, shape=self.shape)
#
# def initOnes(self):
# return tf.constant(1.0, dtype=CalcFloatStr, shape=self.shape)
def init(self, vals):
if isNumber(vals):
return tf.constant(vals, dtype=CalcFloatStr, shape=self.shape)
else:
if isinstance(vals, list) or isinstance(vals, np.ndarray):
if len(vals) != np.prod(self.shape):
raise ValueError('Number of initialization values ' + str(len(vals)) + ' of variable ' + self.name + ' does not match its shape ' + str(self.shape))
vals = np.reshape(np.array(vals, dtype=CalcFloatStr), self.shape)
# if callable(vals):
# vshape = vals().shape
# else:
# vshape = vals.shape
# if not equalShape(vshape, self.shape):
# raise ValueError('Initialization shape ' + str(vshape) + ' of variable ' + self.name + ' does not match its shape ' + str(self.shape))
return totensor(vals)
# def initIndividual(self, vals):
# return tf.variable(vals, dtype=CalcFloatStr)
def initGaussian(self, mu=0.0, sig=1.0):
x = self.ramp()
mu = totensor(mu)
sig = totensor(sig)
initVals = tf.exp(-(x - mu) ** 2. / (2 * (sig ** 2.)))
initVals = initVals / tf.reduce_sum(input_tensor=initVals) # normalize (numerical !, since the domain is not infinite)
return initVals
def initDelta(self, pos=0):
x = self.ramp()
initVals = tf.cast(x == pos, CalcFloatStr) # 1.0 *
return initVals
def initSigmoid(self, mu=0.0, sig=1.0, offset=0.0):
"""
models a sigmoidal function starting near 0,
reaching 0.5 at mu and extending to one at inf, the width being controlled by sigma
"""
x = self.ramp()
mu = totensor(mu);
sig = totensor(sig)
initVals = 1. / (1. + tf.exp(-(x - mu) / sig)) + offset
initVals = initVals / tf.reduce_sum(input_tensor=initVals) # normalize (numerical !, since the domain is not infinite)
return initVals
def NDim(var):
if istensor(var):
return var.shape.ndims
else:
return var.ndim
def subSlice(var, dim, sliceStart, sliceEnd): # extracts a subslice along a particular dimension
numdims = NDim(var)
idx = [slice(sliceStart, sliceEnd) if (d == dim or numdims + dim == d) else slice(0, None) for d in range(numdims)]
return var[idx]
def firstSlice(var, dim): # extracts the first subslice along a particular dimension
return subSlice(var, dim, 0, 1)
def lastSlice(var, dim): # extracts the last subslice along a particular dimension
return subSlice(var, dim, -1, None)
def reduceSumTo(State, dst):
# redsz = min(sz1, sz2)
if isinstance(dst, np.ndarray):
dstSize = np.array(dst.shape)
else:
dstSize = np.array(dst.shape.as_list(), dtype=int)
if len(dst.shape) == 0: # i.e. a scalar
dstSize = np.ones(State.ndim, dtype=int)
rs = np.array(State.shape.as_list(), dtype=int)
toReduce =
|
np.nonzero((rs > dstSize) & (dstSize == 1))
|
numpy.nonzero
|
# -*- coding:utf-8 -*-
import numpy as np
def load_sim_data():
data = np.matrix([[1. ,2.1],[2. , 1.1],[1.3 ,1.],[1. ,1.],[2. ,1.]])
class_labels = [1.0, 1.0, -1.0, -1.0, 1.0]
return data, class_labels
def stump_classify(data_matrix, dimension, threshold_value, threshold_ineq):
'''
输入:数据矩阵,特征维数,某一特征的分类阈值,分类不等号
功能:输出决策树桩标签
输出:标签
'''
return_array = np.ones((np.shape(data_matrix)[0], 1))
if threshold_ineq == 'lt':
return_array[data_matrix[:, dimension] <= threshold_value] = -1
else:
return_array[data_matrix[:, dimension] >= threshold_value] = -1
return return_array
def build_stump(data_array, class_labels, distibution):
'''
输入:数据矩阵,对应的真实类别标签,特征的权值分布
功能:在数据集上,找到加权错误率(分类错误率)最小的单层决策树,显然,该指标函数与权重向量有密切关系
输出:最佳树桩(特征,分类特征阈值,不等号方向),最小加权错误率,该权值向量D下的分类标签估计值
'''
data_matrix = np.mat(data_array)
label_mat = np.mat(class_labels).T
m, n = np.shape(data_matrix)
step_num = 10.0
best_stump = {}
best_class_est = np.mat(np.zeros((m, 1)))
min_error = np.inf
for i in range(n):
range_min = data_matrix[:, i].min()
range_max = data_matrix[:, i].max()
step_size = (range_max - range_min) / step_num
for j in range(-1, int(step_num) + 1):
for thresholdIneq in ['lt', 'gt']:
threshold_value = range_min + float(j) * step_size
predict_class = stump_classify(data_matrix, i, threshold_value, thresholdIneq)
error_array = np.mat(np.ones((m, 1)))
error_array[predict_class == label_mat] = 0
weight_err = distibution.T * error_array
if weight_err < min_error:
min_error = weight_err
best_class_est = predict_class.copy()
best_stump['dimen'] = i
best_stump['threshlod_value'] = threshold_value
best_stump['threshlod_ineq'] = thresholdIneq
return best_class_est, min_error, best_stump
def add_boost_train_ds(data_arr, class_labels, num_iter=40):
'''
输入:数据集,标签向量,最大迭代次数
功能:创建adaboost加法模型
输出:多个弱分类器的数组
'''
weak_class = []
m, n = np.shape(data_arr)
D = np.mat(np.ones((m, 1))/m)
agg_class_est = np.mat(np.zeros((m, 1)))
for i in range(num_iter):
best_class_est, min_err, best_stump = build_stump(data_arr, class_labels,D)
print("D.T: ", end=" ")
print(D.T)
alpha = float(0.5 * np.log((1-min_err)/max(min_err, 1e-16)))
print("alpha:", end=" ")
print(alpha)
best_stump['alpha'] = alpha
weak_class.append(best_stump) #step3:将基本分类器添加到弱分类的数组中
print("class_est: ", end=" ")
print(best_class_est)
expon = np.multiply(-1*alpha*np.mat(class_labels).T, best_class_est)
D = np.multiply(D, np.exp(expon))
D = D/D.sum() #step4:更新权重,该式是让D服从概率分布
agg_class_est += alpha * best_class_est
agg_class_est += alpha * agg_class_est # steo5:更新累计类别估计值
print("aggClassEst: ", end=" ")
print(agg_class_est.T)
print(np.sign(agg_class_est) != np.mat(class_labels).T)
aggError = np.multiply(np.sign(agg_class_est) !=
|
np.mat(class_labels)
|
numpy.mat
|
import unittest
from random import random, seed
import numpy as np
from skued import biexponential, exponential
seed(23)
class TestExponentialDecay(unittest.TestCase):
def setUp(self):
self.tzero = 10 * (random() - 0.5) # between -5 and 5
self.amp = 5 * random() + 5 # between 5 and 10
self.tconst = random() + 0.3 # between 0.3 and 1.3
def test_tzero_limits(self):
""" Test that the output of ``exponential`` has the correct time-zero """
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=self.tzero, amp=self.amp, tconst=self.tconst)
# Check that all values before time-zero are the amplitude
self.assertTrue(np.all(np.equal(I[t < self.tzero], self.amp)))
self.assertTrue(np.all(np.less(I[t > self.tzero], self.amp)))
def test_positivity(self):
""" Test that the output of ``exponential`` is always positive. """
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=self.tzero, amp=self.amp, tconst=self.tconst)
self.assertTrue(np.all(I > 0))
def test_amplitude(self):
""" Test that the output of ``exponential`` is at most ``amp``. """
t = np.arange(-10, 50, step=0.3)
I = exponential(t, tzero=self.tzero, amp=self.amp, tconst=self.tconst)
self.assertTrue(np.all(np.less_equal(I, self.amp)))
def test_offset(self):
""" Test that the output of ``exponential`` is at lest ``offset``. """
offset = 15
t = np.arange(-10, 50, step=0.3)
I = exponential(
t, tzero=self.tzero, amp=self.amp, tconst=self.tconst, offset=offset
)
self.assertTrue(np.all(np.greater_equal(I, offset)))
class TestBiExponentialDecay(unittest.TestCase):
def setUp(self):
self.tzero = 10 * (random() - 0.5) # between -5 and 5
self.amp1 = 5 * random() + 5 # between 5 and 10
self.tconst1 = random() + 0.3 # between 0.3 and 1.3
self.amp2 = 5 * random() + 5 # between 5 and 10
self.tconst2 = random() + 0.3 # between 0.3 and 1.3
def test_tzero_limits(self):
""" Test that the output of ``biexponential`` has the correct time-zero """
t = np.arange(-10, 50, step=0.3)
I = biexponential(
t,
tzero=self.tzero,
amp1=self.amp1,
amp2=self.amp2,
tconst1=self.tconst1,
tconst2=self.tconst2,
)
# Check that all values before time-zero are the amplitude
self.assertTrue(np.all(
|
np.equal(I[t < self.tzero], self.amp1 + self.amp2)
|
numpy.equal
|
# encoding: utf-8
from brian2 import *
from PIL import Image
import numpy as np
from scipy import misc
import matplotlib.pyplot as pyplot
import time
import math
import matlab.engine
import os
import scipy.io as sio
# three layers: input_layer, input_s1 layer and excitatory_layer,
# one to one connection between input_s1 layer and s1_layer,
# and one to N spatial connection between s1_layer and excitatory_layer
# add dynamic refining
class UnsupervisedSNM:
"""This is the Spiking Neuron Model that was presented in the paper:
"Retina-like Visual Image Reconstruction via Spiking Neural Model", CVPR'20 """
def __init__(self, config, interval, indices, times):
#super(UnsupervisedSNM, self).__init__()
self.recon_mode = config.recon_mode
self.camera_moving = False
self.camera_fix_1 = False
self.camera_fix_2 = False
if self.recon_mode == 'camera_moving':
self.camera_moving = True
elif self.recon_mode == 'camera_fix_1':
self.camera_fix_1 = True
elif self.recon_mode == 'camera_fix_2':
self.camera_fix_2 = True
else:
self.camera_moving = True
self.height = config.height
self.width = config.width
self.scale_ts = config.scale_ts
self.stable_ts = config.stable_ts
self.input_time = config.input_time
self.run_time = config.run_time
self.save_data = config.save_data
self.load_data = config.load_data
'''==========Neuron parameters========='''
tau = config.tau
vth = config.vth
tauth = config.tauth
tauth_recon = config.tauth_recon
t_refractory = config.t_refractory
taum = config.taum
Ee = config.Ee
vr = config.vr
El = config.El
taue = config.taue
gmax = config.gmax
dApre = config.dApre
taupre = config.taupre
taupost = config.taupost
dApost = -dApre * taupre / taupost * 1.05
dApost *= gmax
dApre *= gmax
self.last_change = [0 for i in range(self.width*self.height)]
self.n_input = self.height*self.width
self.interval = interval
file_name = 'temp'
if self.camera_moving != True:
self.motioncut = self.build_motion_excitation_layer(interval, file_name)
ng_input = SpikeGeneratorGroup(self.n_input, indices, times * self.scale_ts * us)
ng_e1, sm_e1 = self.build_spike_refining_layer(self.n_input, False, tau, vth, tauth)
conn1 = self.build_ordinary_connection(ng_input, ng_e1)
conn1.connect(j='i')
ng_e2, vm_e2, vth_e2 = self.build_visual_reconstruction_layer(self.n_input, False, Ee, vr, El, taum, vth, tauth_recon, taue)
conn2 = self.build_stdp_connection(ng_e1, ng_e2, True, taupre, taupost)
conn2 = self.build_spatial_connection(conn2, ng_e1, ng_e2, 15)
run(self.run_time, report='text')
self.vth_e2 = vth_e2
def build_motion_excitation_layer(self, interval, file_name):
if self.load_data:
output = np.load('./output-%s-%sus.npy'%(file_name,int(self.input_time/us)))
else:
sio.savemat('interval-%s-%sus.mat'%(file_name,int(self.input_time/us)), {'itv': interval})
eng = matlab.engine.start_matlab()
output = eng.motioncut(file_name,'%d'%(self.input_time/us),float(self.input_time/25/us), self.height, self.width, 0.5) #0.5for rotation
output = np.array(output)
eng.quit()
if self.save_data:
np.save('./output-%s-%sus.npy'%(file_name,int(self.input_time/us)),output)
return output
def build_motion_excitation_layer_simple(self, interval, time):
output = np.zeros((self.height,self.width,time))
for i in range(self.height):
for j in range(self.width):
s= 0
for t in range(len(interval[i*self.width + j])-1):
s += interval[i*self.width + j][t]
if interval[i*self.width + j][t] == 0: continue
if (abs(256/interval[i*self.width + j][t+1] - 256/interval[i*self.width + j][t])) > 5 and (abs(interval[i*self.width + j][t+1]-interval[i*self.width + j][t])>1):
output[i,j,s: s+ interval[i*self.width + j][t+1]] = 1
return output
def build_motion_excitation_layer_confid(self, interval, file_name):
sio.savemat('interval_confid-%s-%sus.mat'%(file_name,int(self.input_time/us)), {'itv': interval})
eng = matlab.engine.start_matlab()
output = eng.motioncondifence(file_name,'%d'%(self.input_time/us),float(self.input_time/25/us), 0.5) #0.5for rotation
output =
|
np.array(output)
|
numpy.array
|
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
from rnalysis import general
from rnalysis.filtering import *
import os
from tests import __attr_ref__, __biotype_ref__
def test_deseqfilter_api():
d = DESeqFilter('tests/test_files/test_deseq_biotype.csv')
def test_filter_inplace():
d = DESeqFilter('tests/test_files/test_deseq_no_nans.csv')
d_copy = DESeqFilter('tests/test_files/test_deseq_no_nans.csv')
truth = general.load_csv('tests/test_files/counted.csv')
d_inplace_false = d._inplace(truth, opposite=False, inplace=False, suffix='suffix')
assert np.all(d_inplace_false.df == truth)
assert np.all(d.df == d_copy.df)
d._inplace(truth, opposite=False, inplace=True, suffix='other_suffix')
assert np.all(d.df == truth)
def test_countfilter_api():
h = CountFilter('tests/test_files/counted.csv')
def test_countfilter_normalize_to_rpm():
truth = general.load_csv(r"tests/test_files/test_norm_reads_rpm.csv", 0)
h = CountFilter(r"tests/test_files/counted.csv")
h.normalize_to_rpm(r"tests/test_files/uncounted.csv")
assert np.isclose(truth, h.df).all()
def test_countfilter_norm_reads_with_scaling_factors():
truth = general.load_csv(r"tests/test_files/test_norm_scaling_factors.csv", 0)
h = CountFilter(r"tests/test_files/counted.csv")
h.normalize_with_scaling_factors(r"tests/test_files/scaling_factors.csv")
assert np.isclose(truth, h.df).all()
def test_filter_low_reads():
truth = general.load_csv("tests/test_files/counted_low_rpm_truth.csv", 0)
h = CountFilter("tests/test_files/counted_low_rpm.csv")
h.filter_low_reads(threshold=5)
assert np.isclose(truth, h.df).all()
def test_filter_low_reads_reverse():
h = CountFilter(r"tests/test_files/counted.csv")
low_truth = general.load_csv(r"tests/test_files/counted_below60_rpm.csv", 0)
h.filter_low_reads(threshold=60, opposite=True)
h.df.sort_index(inplace=True)
low_truth.sort_index(inplace=True)
print(h.shape)
print(low_truth.shape)
print(h.df)
print(low_truth)
assert np.all(h.df == low_truth)
def test_htcount_filter_biotype():
truth_protein_coding = general.load_csv('tests/test_files/counted_biotype_protein_coding.csv', 0)
truth_pirna = general.load_csv('tests/test_files/counted_biotype_piRNA.csv', 0)
h = CountFilter("tests/test_files/counted_biotype.csv")
protein_coding = h.filter_biotype(ref=__biotype_ref__, inplace=False)
pirna = h.filter_biotype('piRNA', ref=__biotype_ref__, inplace=False)
pirna.df.sort_index(inplace=True)
protein_coding.df.sort_index(inplace=True)
truth_protein_coding.sort_index(inplace=True)
truth_pirna.sort_index(inplace=True)
assert np.all(truth_protein_coding == protein_coding.df)
assert np.all(truth_pirna == pirna.df)
def test_htcount_filter_biotype_opposite():
truth_no_pirna = general.load_csv(r'tests/test_files/counted_biotype_no_piRNA.csv', 0)
h = CountFilter("tests/test_files/counted_biotype.csv")
h.filter_biotype('piRNA', ref=__biotype_ref__, opposite=True, inplace=True)
h.df.sort_index(inplace=True)
truth_no_pirna.sort_index(inplace=True)
assert np.all(h.df == truth_no_pirna)
def test_filter_by_attribute_union():
union_truth = general.load_csv(r'tests/test_files/counted_filter_by_bigtable_union_truth.csv', 0)
h = CountFilter('tests/test_files/counted_filter_by_bigtable.csv')
union = h.filter_by_attribute(['attribute1', 'attribute2'], mode='union',
ref=__attr_ref__, inplace=False)
union.df.sort_index(inplace=True)
union_truth.sort_index(inplace=True)
assert np.all(union.df == union_truth)
def test_filter_by_attribute_intersection():
intersection_truth = general.load_csv(r'tests/test_files/counted_filter_by_bigtable_intersect_truth.csv', 0)
h = CountFilter('tests/test_files/counted_filter_by_bigtable.csv')
intersection = h.filter_by_attribute(['attribute1', 'attribute2'], mode='intersection',
ref=__attr_ref__,
inplace=False)
intersection.df.sort_index(inplace=True)
intersection_truth.sort_index(inplace=True)
assert np.all(intersection.df == intersection_truth)
def test_deseq_filter_significant():
truth = general.load_csv("tests/test_files/test_deseq_sig_truth.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_sig.csv")
d.filter_significant(alpha=0.05)
assert np.all(d.df == truth)
def test_deseq_filter_significant_opposite():
truth = general.load_csv(r'tests/test_files/test_deseq_not_sig_truth.csv', 0)
d = DESeqFilter("tests/test_files/test_deseq_sig.csv")
d.filter_significant(alpha=0.05, opposite=True)
d.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
truth.fillna(1234567890, inplace=True)
d.df.fillna(1234567890, inplace=True)
assert np.all(d.df == truth)
def test_filter_top_n_ascending_number():
truth = general.load_csv("tests/test_files/test_deseq_top10.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq.csv")
d.filter_top_n('padj', 10)
d.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.isclose(truth, d.df).all()
def test_filter_top_n_ascending_text():
truth = general.load_csv("tests/test_files/test_deseq_top10_text_ascend.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_textcol.csv")
d.filter_top_n('textcol', 10, True)
d.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == d.df)
def test_filter_top_n_descending_number():
truth = general.load_csv("tests/test_files/test_deseq_bottom7.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq.csv")
d.filter_top_n('log2FoldChange', 7, False)
d.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.isclose(truth, d.df).all()
def test_filter_top_n_descending_text():
truth = general.load_csv("tests/test_files/test_deseq_bottom10_text_descend.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_textcol.csv")
d.filter_top_n('textcol', 10, False)
d.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == d.df)
def test_filter_top_n_nonexisting_column():
d = DESeqFilter("tests/test_files/test_deseq.csv")
colname = 'somecol'
with pytest.raises(AssertionError):
d.filter_top_n(colname, 5)
d.filter_top_n([d.df.columns[0], colname])
assert colname not in d.df.columns
def test_deseq_filter_abs_log2_fold_change():
truth = general.load_csv("tests/test_files/test_deseq_fc_4_truth.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_fc.csv")
fc4 = d.filter_abs_log2_fold_change(4, inplace=False)
fc4.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(fc4.df == truth)
def test_deseq_filter_fold_change_direction():
pos_truth = general.load_csv("tests/test_files/test_deseq_fc_pos_truth.csv", 0)
neg_truth = general.load_csv("tests/test_files/test_deseq_fc_neg_truth.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_fc.csv")
pos = d.filter_fold_change_direction('pos', inplace=False)
neg = d.filter_fold_change_direction('neg', inplace=False)
assert np.all(pos.df == pos_truth)
assert np.all(neg.df == neg_truth)
def test_deseq_split_fold_change():
d = DESeqFilter("tests/test_files/test_deseq_fc.csv")
pos_truth = general.load_csv("tests/test_files/test_deseq_fc_pos_truth.csv", 0)
neg_truth = general.load_csv("tests/test_files/test_deseq_fc_neg_truth.csv", 0)
d = DESeqFilter("tests/test_files/test_deseq_fc.csv")
pos, neg = d.split_fold_change_direction()
assert np.all(pos.df == pos_truth)
assert np.all(neg.df == neg_truth)
def test_intersection():
intersection_truth = {'WBGene00021375', 'WBGene00044258', 'WBGene00219304', 'WBGene00194708', 'WBGene00018199',
'WBGene00019174', 'WBGene00021019', 'WBGene00013816', 'WBGene00045366', 'WBGene00219307',
'WBGene00045410', 'WBGene00010100', 'WBGene00077437', 'WBGene00007674', 'WBGene00023036',
'WBGene00012648', 'WBGene00022486'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
assert set1.intersection(set2, inplace=False) == intersection_truth
def test_union():
intersection_truth = {'WBGene00021375', 'WBGene00044258', 'WBGene00219304', 'WBGene00194708', 'WBGene00018199',
'WBGene00019174', 'WBGene00021019', 'WBGene00013816', 'WBGene00045366', 'WBGene00219307',
'WBGene00045410', 'WBGene00010100', 'WBGene00077437', 'WBGene00007674', 'WBGene00023036',
'WBGene00012648', 'WBGene00022486'}
set2_unique = {'WBGene00018193', 'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
union_truth = intersection_truth.union(set1_unique.union(set2_unique))
assert set1.union(set2) == union_truth
def test_difference():
set2_unique = {'WBGene00018193', 'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
assert set1.difference(set2, inplace=False) == set1_unique
assert set2.difference(set1, inplace=False) == set2_unique
def test_symmetric_difference():
set2_unique = {'WBGene00018193', 'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
assert set1.symmetric_difference(set2) == set2.symmetric_difference(set1)
assert set1.symmetric_difference(set2) == set1_unique.union(set2_unique)
def test_deseq_feature_set():
truth = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730', 'WBGene00012648',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478', 'WBGene00021375',
'WBGene00044258', 'WBGene00219304', 'WBGene00194708', 'WBGene00018199', 'WBGene00022486',
'WBGene00019174', 'WBGene00021019', 'WBGene00013816', 'WBGene00045366', 'WBGene00219307',
'WBGene00045410', 'WBGene00010100', 'WBGene00077437', 'WBGene00007674', 'WBGene00023036'}
d = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
assert d.index_set == truth
def test_deseq_feature_string():
truth = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730', 'WBGene00012648',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478', 'WBGene00021375',
'WBGene00044258', 'WBGene00219304', 'WBGene00194708', 'WBGene00018199', 'WBGene00022486',
'WBGene00019174', 'WBGene00021019', 'WBGene00013816', 'WBGene00045366', 'WBGene00219307',
'WBGene00045410', 'WBGene00010100', 'WBGene00077437', 'WBGene00007674', 'WBGene00023036'}
d = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
assert set(d.index_string.split("\n")) == truth
def test_set_ops_multiple_variable_types():
set2_unique = {'WBGene00018193', 'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set1 = CountFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
assert set1.symmetric_difference(set2) == set2.symmetric_difference(set1)
assert set1.symmetric_difference(set2) == set1_unique.union(set2_unique)
def test_htcount_rpm_negative_threshold():
h = CountFilter("tests/test_files/counted.csv")
with pytest.raises(AssertionError):
h.filter_low_reads(threshold=-3)
def test_htcount_threshold_invalid():
h = CountFilter("tests/test_files/counted.csv")
with pytest.raises(AssertionError):
h.filter_low_reads("5")
def test_htcount_split_by_reads():
h = CountFilter(r"tests/test_files/counted.csv")
high_truth = general.load_csv(r"tests/test_files/counted_above60_rpm.csv", 0)
low_truth = general.load_csv(r"tests/test_files/counted_below60_rpm.csv", 0)
high, low = h.split_by_reads(threshold=60)
assert np.all(high.df == high_truth)
assert np.all(low.df == low_truth)
def test_filter_percentile():
truth = general.load_csv(r'tests/test_files/test_deseq_percentile_0.25.csv', 0)
h = DESeqFilter(r'tests/test_files/test_deseq_percentile.csv')
h.filter_percentile(0.25, 'padj', inplace=True)
h.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(h.df == truth)
def test_split_by_percentile():
truth_below = general.load_csv(r'tests/test_files/test_deseq_percentile_0.25.csv', 0)
truth_above = general.load_csv(r'tests/test_files/test_deseq_percentile_0.75.csv', 0)
h = DESeqFilter(r'tests/test_files/test_deseq_percentile.csv')
below, above = h.split_by_percentile(0.25, 'padj')
for i in [truth_below, truth_above, below.df, above.df]:
i.sort_index(inplace=True)
assert np.all(truth_below == below.df)
assert np.all(truth_above == above.df)
def test_htcount_filter_biotype_multiple():
truth = general.load_csv('tests/test_files/counted_biotype_piRNA_protein_coding.csv', 0)
h = CountFilter("tests/test_files/counted_biotype.csv")
both = h.filter_biotype(['protein_coding', 'piRNA'], ref=__biotype_ref__, inplace=False)
both.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == both.df)
def test_htcount_filter_biotype_multiple_opposite():
truth = general.load_csv('tests/test_files/counted_biotype_piRNA_protein_coding_opposite.csv', 0)
h = CountFilter("tests/test_files/counted_biotype.csv")
neither = h.filter_biotype(['protein_coding', 'piRNA'], ref=__biotype_ref__, inplace=False,
opposite=True)
neither.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == neither.df)
def test_deseq_filter_biotype():
truth_protein_coding = general.load_csv('tests/test_files/test_deseq_biotype_protein_coding.csv', 0)
truth_pirna = general.load_csv('tests/test_files/test_deseq_biotype_piRNA.csv', 0)
d = DESeqFilter("tests/test_files/test_deseq_biotype.csv")
protein_coding = d.filter_biotype(ref=__biotype_ref__, inplace=False)
pirna = d.filter_biotype('piRNA', ref=__biotype_ref__, inplace=False)
pirna.df.sort_index(inplace=True)
protein_coding.df.sort_index(inplace=True)
truth_protein_coding.sort_index(inplace=True)
truth_pirna.sort_index(inplace=True)
assert np.all(truth_protein_coding == protein_coding.df)
assert np.all(truth_pirna == pirna.df)
def test_deseq_filter_biotype_opposite():
truth_no_pirna = general.load_csv(r'tests/test_files/test_deseq_biotype_piRNA_opposite.csv', 0)
d = DESeqFilter("tests/test_files/test_deseq_biotype.csv")
d.filter_biotype('piRNA', ref=__biotype_ref__, opposite=True, inplace=True)
d.df.sort_index(inplace=True)
truth_no_pirna.sort_index(inplace=True)
assert np.all(d.df == truth_no_pirna)
def test_deseq_filter_biotype_multiple():
truth = general.load_csv('tests/test_files/test_deseq_biotype_piRNA_protein_coding.csv', 0)
d = DESeqFilter("tests/test_files/test_deseq_biotype.csv")
both = d.filter_biotype(['protein_coding', 'piRNA'], ref=__biotype_ref__, inplace=False)
both.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == both.df)
def test_deseq_filter_biotype_multiple_opposite():
truth = general.load_csv('tests/test_files/test_deseq_biotype_piRNA_protein_coding_opposite.csv', 0)
d = DESeqFilter("tests/test_files/test_deseq_biotype.csv")
neither = d.filter_biotype(['protein_coding', 'piRNA'], ref=__biotype_ref__, inplace=False,
opposite=True)
neither.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(truth == neither.df)
def test_deseqfilter_union_multiple():
intersection_truth = {'WBGene00021375', 'WBGene00044258', 'WBGene00219304', 'WBGene00194708', 'WBGene00018199',
'WBGene00019174', 'WBGene00021019', 'WBGene00013816', 'WBGene00045366', 'WBGene00219307',
'WBGene00045410', 'WBGene00010100', 'WBGene00077437', 'WBGene00007674', 'WBGene00023036',
'WBGene00012648', 'WBGene00022486'}
set2_unique = {'WBGene00018193', 'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00008447', 'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set3_unique = {'WBGene44444444', 'WBGene99999999', 'WBGene98765432'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
set3 = {'WBGene00077437', 'WBGene00007674', 'WBGene00023036', 'WBGene00012648', 'WBGene44444444', 'WBGene99999999',
'WBGene98765432'}
union_truth = intersection_truth.union(set1_unique, set2_unique, set3_unique)
assert set1.union(set2, set3) == union_truth
def test_deseqfilter_intersection_multiple():
intersection_truth = {'WBGene00077437', 'WBGene00007674', 'WBGene00023036',
'WBGene00012648', 'WBGene00022486'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
set3 = {'WBGene00077437', 'WBGene00007674', 'WBGene00023036', 'WBGene00012648', 'WBGene00022486', 'WBGene99999999',
'WBGene98765432'}
assert set1.intersection(set2, set3, inplace=False) == intersection_truth
def test_deseqfilter_difference_multiple():
set2_unique = {'WBGene00021589', 'WBGene00001118', 'WBGene00010755', 'WBGene00020407',
'WBGene00044799', 'WBGene00021654', 'WBGene00012919', 'WBGene00021605'}
set1_unique = {'WBGene00021018', 'WBGene00012452', 'WBGene00010507', 'WBGene00022730',
'WBGene00012961', 'WBGene00022438', 'WBGene00016635', 'WBGene00044478'}
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
set3 = {'WBGene00018193', 'WBGene00008447', 'WBGene12345678'}
assert set1.difference(set2, set3, inplace=False) == set1_unique
assert set2.difference(set3, set1, inplace=False) == set2_unique
def test_intersection_inplace():
set1_truth = general.load_csv('tests/test_files/test_deseq_set_ops_1_inplace_intersection.csv', 0)
set2_truth = general.load_csv('tests/test_files/test_deseq_set_ops_2_inplace_intersection.csv', 0)
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
set1_int = set1.__copy__()
set2_int = set2.__copy__()
set1_int.intersection(set2, inplace=True)
set2_int.intersection(set1, inplace=True)
set1_int.df.sort_index(inplace=True)
set2_int.df.sort_index(inplace=True)
set1_truth.sort_index(inplace=True)
set2_truth.sort_index(inplace=True)
assert np.all(set1_truth == set1_int.df)
assert np.all(set2_truth == set2_int.df)
def test_difference_inplace():
set1_truth = general.load_csv('tests/test_files/test_deseq_set_ops_1_inplace_difference.csv', 0)
set2_truth = general.load_csv('tests/test_files/test_deseq_set_ops_2_inplace_difference.csv', 0)
set1 = DESeqFilter('tests/test_files/test_deseq_set_ops_1.csv')
set2 = DESeqFilter('tests/test_files/test_deseq_set_ops_2.csv')
set1_diff = set1.__copy__()
set2_diff = set2.__copy__()
set1_diff.difference(set2, inplace=True)
set2_diff.difference(set1, inplace=True)
set1_diff.df.sort_index(inplace=True)
set2_diff.df.sort_index(inplace=True)
set1_truth.sort_index(inplace=True)
set2_truth.sort_index(inplace=True)
assert np.all(set1_truth == set1_diff.df)
assert np.all(set2_truth == set2_diff.df)
def test_htcount_fold_change():
truth_num_name = f"Mean of {['cond1_rep1', 'cond1_rep2']}"
truth_denom_name = f"Mean of {['cond2_rep1', 'cond2_rep2']}"
truth = general.load_csv(r'tests/test_files/counted_fold_change_truth.csv', 0)
truth = truth.squeeze()
h = CountFilter(r'tests/test_files/counted_fold_change.csv')
fc = h.fold_change(['cond1_rep1', 'cond1_rep2'], ['cond2_rep1', 'cond2_rep2'])
assert truth_num_name == fc.numerator
assert truth_denom_name == fc.denominator
assert np.all(np.isclose(fc.df, truth))
def test_fc_randomization():
truth = general.load_csv('tests/test_files/fc_randomization_truth.csv')
fc1 = FoldChangeFilter("tests/test_files/fc_1.csv", 'a', 'b')
fc2 = FoldChangeFilter("tests/test_files/fc_2.csv", "c", "d")
random_state = np.random.get_state()
res = fc1.randomization_test(fc2)
try:
assert np.all(truth['significant'] == res['significant'])
assert np.isclose(truth.iloc[:, :-1], res.iloc[:, :-1]).all()
except AssertionError:
raise AssertionError(f'Enrichment test failed with the numpy.random state: \n{random_state}')
def test_fcfilter_filter_abs_fc():
truth = general.load_csv('tests/test_files/fcfilter_abs_fold_change_truth.csv', 0)
truth = truth.squeeze()
truth.sort_index(inplace=True)
f = FoldChangeFilter('tests/test_files/counted_fold_change_truth.csv', 'numer', 'denom')
f.filter_abs_log2_fold_change(1)
f.df.sort_index(inplace=True)
print(f.df.values)
print(truth.values)
assert np.all(np.squeeze(f.df.values) == np.squeeze(truth.values))
def test_number_filters_gt():
truth = general.load_csv(r'tests/test_files/test_deseq_gt.csv', 0)
d = DESeqFilter(r'tests/test_files/test_deseq.csv')
filt_1 = d.number_filters('baseMean', '>', 1000, inplace=False)
filt_2 = d.number_filters('baseMean', 'GT', 1000, inplace=False)
filt_3 = d.number_filters('baseMean', 'greater tHAn', 1000, inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
filt_3.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(filt_1.df == filt_2.df)
assert np.all(filt_2.df == filt_3.df)
assert np.all(np.squeeze(truth) == np.squeeze(filt_1.df))
def test_number_filters_lt():
truth = general.load_csv(r'tests/test_files/test_deseq_lt.csv', 0)
d = DESeqFilter(r'tests/test_files/test_deseq.csv')
filt_1 = d.number_filters('lfcSE', 'Lesser than', 0.2, inplace=False)
filt_2 = d.number_filters('lfcSE', 'lt', 0.2, inplace=False)
filt_3 = d.number_filters('lfcSE', '<', 0.2, inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
filt_3.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(filt_1.df == filt_2.df)
assert np.all(filt_2.df == filt_3.df)
assert np.all(np.squeeze(truth) == np.squeeze(filt_1.df))
def test_number_filters_eq():
truth = general.load_csv(r'tests/test_files/counted_eq.csv', 0)
d = CountFilter(r'tests/test_files/counted.csv')
filt_1 = d.number_filters('cond2', 'eQ', 0, inplace=False)
filt_2 = d.number_filters('cond2', '=', 0, inplace=False)
filt_3 = d.number_filters('cond2', 'Equals', 0, inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
filt_3.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(filt_1.df == filt_2.df)
assert np.all(filt_2.df == filt_3.df)
assert np.all(np.squeeze(truth) == np.squeeze(filt_1.df))
def test_number_filters_invalid_input():
d = CountFilter(r'tests/test_files/counted.csv')
with pytest.raises(AssertionError):
d.number_filters('Cond2', 'lt', 5)
with pytest.raises(AssertionError):
d.number_filters('cond2', 'contains', 6)
with pytest.raises(AssertionError):
d.number_filters('cond2', 'equals', '55')
def test_text_filters_eq():
truth = general.load_csv(r'tests/test_files/text_filters_eq.csv', 0)
d = CountFilter(r'tests/test_files/text_filters.csv')
filt_1 = d.text_filters('class', 'eQ', 'B', inplace=False)
filt_2 = d.text_filters('class', '=', 'B', inplace=False)
filt_3 = d.text_filters('class', 'Equals', 'B', inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
filt_3.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(filt_1.df == filt_2.df)
assert np.all(filt_2.df == filt_3.df)
assert np.all(np.squeeze(truth) == np.squeeze(filt_1.df))
def test_text_filters_ct():
truth = general.load_csv(r'tests/test_files/text_filters_ct.csv', 0)
d = CountFilter(r'tests/test_files/text_filters.csv')
filt_1 = d.text_filters('name', 'ct', 'C3.', inplace=False)
filt_2 = d.text_filters('name', 'IN', 'C3.', inplace=False)
filt_3 = d.text_filters('name', 'contaiNs', 'C3.', inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
filt_3.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
assert np.all(filt_1.df == filt_2.df)
assert np.all(filt_2.df == filt_3.df)
assert np.all(np.squeeze(truth) == np.squeeze(filt_1.df))
def test_text_filters_sw():
truth = general.load_csv(r'tests/test_files/text_filters_sw.csv', 0)
d = CountFilter(r'tests/test_files/text_filters.csv')
filt_1 = d.text_filters('name', 'sw', '2R', inplace=False)
filt_2 = d.text_filters('name', 'Starts With', '2R', inplace=False)
filt_1.df.sort_index(inplace=True)
filt_2.df.sort_index(inplace=True)
truth.sort_index(inplace=True)
print(filt_1.df)
assert np.all(filt_1.df == filt_2.df)
assert np.all(np.squeeze(truth) ==
|
np.squeeze(filt_1.df)
|
numpy.squeeze
|
"""
precession. TODO: write me here
"""
import warnings
import numpy as np
import scipy.special
import scipy.integrate
from sympy import elliptic_pi
def roots_vec(p):
"""
Locate roots of polynomial using a vectorized version of numpy.roots. Equivalent to [np.roots(x) for x in p].
Credits: stackoverflow user `pv`, see https://stackoverflow.com/a/35853977
Call
----
roots = roots_vec(p)
Parameters
----------
p: array
Polynomial coefficients.
Returns
-------
roots: array
Polynomial roots.
"""
p = np.atleast_1d(p)
n = p.shape[-1]
A = np.zeros(p.shape[:1] + (n-1, n-1), float)
A[..., 1:, :-1] = np.eye(n-2)
A[..., 0, :] = -p[..., 1:]/p[..., None, 0]
return np.linalg.eigvals(A)
def norm_nested(x):
"""
Norm of 2D array of shape (N,3) along last axis.
Call
----
x = normalize_nested(x)
Parameters
----------
x : array
Input array.
Returns
-------
x : array
Normalized array.
"""
return np.linalg.norm(x, axis=1)
def normalize_nested(x):
"""
Normalize 2D array of shape (N,3) along last axis.
Call
----
x = normalize_nested(x)
Parameters
----------
x : array
Input array.
Returns
-------
x : array
Normalized array.
"""
return x/norm_nested(x)[:, None]
def dot_nested(x, y):
"""
Dot product between 2D arrays along last axis.
Call
----
z = dot_nested(x, y)
Parameters
----------
x : array
Input array.
y : array
Input array.
Returns
-------
z : array
Dot product array.
"""
return np.einsum('ij, ij->i', x, y)
def sample_unitsphere(N=1):
"""
Sample points uniformly on a sphere of unit radius. Returns array of shape (N,3).
Call
----
vec = sample_unitsphere(N = 1)
Parameters
----------
N: integer, optional (default: 1)
Number of samples.
Returns
-------
vec: array
Vector in Cartesian coomponents.
"""
vec = np.random.randn(3, N)
vec /= np.linalg.norm(vec, axis=0)
return vec.T
def wraproots(coefficientfunction, *args, **kwargs):
"""
Find roots of a polynomial given coefficients, ordered according to their real part. Complex roots are masked with nans. This is essentially a wrapper of numpy.roots.
Call
----
sols = precession.wraproots(coefficientfunction, *args, **kwargs)
Parameters
----------
coefficientfunction: callable
Function returning the polynomial coefficients ordered from highest to lowest degree.
*args, **kwargs:
Parameters of `coefficientfunction`.
Returns
-------
sols: array
Roots of the polynomial.
"""
coeffs = coefficientfunction(*args, **kwargs)
sols = np.sort_complex(roots_vec(coeffs.T))
sols = np.real(np.where(np.isreal(sols), sols, np.nan))
return sols
@np.vectorize
def ellippi(n, phi, m):
"""
Incomplete elliptic integral of the third kind. At the time of writing, this has not been implemented in scipy yet; here wrapping the sympy implementation. For the complete integral, set phi=np.pi/2.
Call
----
piintegral = precession.ellippi(n, phi, m)
Parameters
----------
n: foat
Characheristic of the elliptic integral.
phi: float
Amplitude of the elliptic integral.
m: float
Parameter of the elliptic integral
Returns
-------
piintegral: float
Incomplete elliptic integral of the third kind
"""
return float(elliptic_pi(n, phi, m))
def rotate_zaxis(vec, angle):
"""
Rotate series of arrays along the z axis of a given angle. Input vec has shape (N,3) and input angle has shape (N,).
Call
----
newvec = rotate_zaxis(vec,angle)
Parameters
----------
vec: array
Input array.
angle: float
Rotation angle.
Returns
-------
newvec: array
Rotated array.
"""
newx = vec[:, 0]*np.cos(angle) - vec[:, 1]*np.sin(angle)
newy = vec[:, 0]*np.sin(angle) + vec[:, 1]*np.cos(angle)
newz = vec[:, 2]
newvec = np.transpose([newx, newy, newz])
return newvec
def ismonotonic(vec, which):
"""
Check if an array is monotonic. The parameter `which` can takes the following values:
- `<` check array is strictly increasing.
- `<=` check array is increasing.
- `>` check array is strictly decreasing.
- `>=` check array is decreasing.
Call
----
check = ismonotonic(vec, which):
Parameters
----------
vec: array
Input array.
which: string
Select function behavior.
Returns
-------
check: boolean
Result
"""
if which == '<':
return np.all(vec[:-1] < vec[1:])
elif which == '<=':
return np.all(vec[:-1] <= vec[1:])
elif which == '>':
return np.all(vec[:-1] > vec[1:])
elif which == '>=':
return np.all(vec[:-1] >= vec[1:])
else:
raise ValueError("`which` needs to be one of the following: `>`, `>=`, `<`, `<=`.")
# Definitions
def eval_m1(q):
"""
Mass of the heavier black hole in units of the total mass.
Call
----
m1 = eval_m1(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m1: float
Mass of the primary (heavier) black hole.
"""
q = np.atleast_1d(q)
m1 = 1/(1+q)
return m1
def eval_m2(q):
"""
Mass of the lighter black hole in units of the total mass.
Call
----
m2 = eval_m2(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m2: float
Mass of the secondary (lighter) black hole.
"""
q = np.atleast_1d(q)
m2 = q/(1+q)
return m2
def masses(q):
"""
Masses of the two black holes in units of the total mass.
Call
----
m1,m2 = masses(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m1: float
Mass of the primary (heavier) black hole.
m2: float
Mass of the secondary (lighter) black hole.
"""
m1 = eval_m1(q)
m2 = eval_m2(q)
return np.stack([m1, m2])
def eval_q(m1, m2):
"""
Mass ratio, 0 < q = m2/m1 < 1.
Call
----
q = eval_q(m1,m2)
Parameters
----------
m1: float
Mass of the primary (heavier) black hole.
m2: float
Mass of the secondary (lighter) black hole.
Returns
-------
q: float
Mass ratio: 0<=q<=1.
"""
m1 = np.atleast_1d(m1)
m2 = np.atleast_1d(m2)
q = m2/m1
assert (q < 1).all(), "The convention used in this code is q=m2/m1<1."
return q
def eval_eta(q):
"""
Symmetric mass ratio eta = m1*m2/(m1+m2)^2 = q/(1+q)^2.
Call
----
eta = eval_eta(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
eta: float
Symmetric mass ratio 0<=eta<=1/4.
"""
q = np.atleast_1d(q)
eta = q/(1+q)**2
return eta
def eval_S1(q, chi1):
"""
Spin angular momentum of the heavier black hole.
Call
----
S1 = eval_S1(q,chi1)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
Returns
-------
S1: float
Magnitude of the primary spin.
"""
chi1 = np.atleast_1d(chi1)
S1 = chi1*(eval_m1(q))**2
return S1
def eval_S2(q, chi2):
"""
Spin angular momentum of the lighter black hole.
Call
----
S2 = eval_S2(q,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
S2: float
Magnitude of the secondary spin.
"""
chi2 = np.atleast_1d(chi2)
S2 = chi2*(eval_m2(q))**2
return S2
def spinmags(q, chi1, chi2):
"""
Spins of the black holes in units of the total mass.
Call
----
S1,S2 = spinmags(q,chi1,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
S1: float
Magnitude of the primary spin.
S2: float
Magnitude of the secondary spin.
"""
S1 = eval_S1(q, chi1)
S2 = eval_S2(q, chi2)
return np.stack([S1, S2])
def eval_L(r, q):
"""
Newtonian angular momentum of the binary.
Call
----
L = eval_L(r,q)
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
Returns
-------
L: float
Magnitude of the Newtonian orbital angular momentum.
"""
r = np.atleast_1d(r)
L = eval_m1(q)*eval_m2(q)*r**0.5
return L
def eval_v(r):
"""
Newtonian orbital velocity of the binary.
Call
----
v = eval_v(r)
Parameters
----------
r: float
Binary separation.
Returns
-------
v: float
Newtonian orbital velocity.
"""
r = np.atleast_1d(r)
v = 1/r**0.5
return v
def eval_r(L=None, u=None, q=None):
"""
Orbital separation of the binary. Valid inputs are either (L,q) or (u,q).
Call
----
r = eval_r(L=None,u=None,q=None)
Parameters
----------
L: float, optional (default: None)
Magnitude of the Newtonian orbital angular momentum.
u: float, optional (default: None)
Compactified separation 1/(2L).
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
Returns
-------
r: float
Binary separation.
"""
if L is not None and u is None and q is not None:
L = np.atleast_1d(L)
m1, m2 = masses(q)
r = (L / (m1 * m2))**2
elif L is None and u is not None and q is not None:
u = np.atleast_1d(u)
r = (2*eval_m1(q)*eval_m2(q)*u)**(-2)
else:
raise TypeError("Provide either (L,q) or (u,q).")
return r
# Limits
def Jlimits_LS1S2(r, q, chi1, chi2):
"""
Limits on the magnitude of the total angular momentum due to the vector relation J=L+S1+S2.
Call
----
Jmin,Jmax = Jlimits_LS1S2(r,q,chi1,chi2)
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
Jmin: float
Minimum value of the total angular momentum J.
Jmax: float
Maximum value of the total angular momentum J.
"""
S1, S2 = spinmags(q, chi1, chi2)
L = eval_L(r, q)
Jmin = np.maximum.reduce([np.zeros(L.shape), L-S1-S2, np.abs(S1-S2)-L])
Jmax = L+S1+S2
return np.stack([Jmin, Jmax])
def kappadiscriminant_coefficients(u, chieff, q, chi1, chi2):
"""
Coefficients of the quintic equation in kappa that defines the spin-orbit resonances.
Call
----
coeff5,coeff4,coeff3,coeff2,coeff1,coeff0 = kappadiscriminant_coefficients(u,chieff,q,chi1,chi2)
Parameters
----------
u: float
Compactified separation 1/(2L).
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
coeff5: float
Coefficient to the x^5 term in polynomial.
coeff4: float
Coefficient to the x^4 term in polynomial.
coeff3: float
Coefficient to the x^3 term in polynomial.
coeff2: float
Coefficient to the x^2 term in polynomial.
coeff1: float
Coefficient to the x^1 term in polynomial.
coeff0: float
Coefficient to the x^0 term in polynomial.
"""
u = np.atleast_1d(u)
q = np.atleast_1d(q)
chieff = np.atleast_1d(chieff)
S1, S2 = spinmags(q, chi1, chi2)
# Machine generated with polycoefficients.nb
coeff5 = -256 * q**3 * ((1 + q))**6 * u
# Machine generated with polycoefficients.nb
coeff4 = 16 * q**2 * ((1 + q))**4 * (((-1 + q**2))**2 + (-16 * ((1 +
q))**2 * (q * (-5 + 3 * q) * S1**2 + (3 + -5 * q) * S2**2) * u**2 +
(40 * q * ((1 + q))**2 * u * chieff + 16 * q**2 * u**2 * chieff**2)))
# Machine generated with polycoefficients.nb
coeff3 = -32 * q * ((1 + q))**4 * (2 * q**6 * S1**2 * u * (-5 + 12 *
S1**2 * u**2) + (2 * S2**2 * u * (-5 + 12 * S2**2 * u**2) + (2 * q**2
* u * (40 * S1**4 * u**2 + (-44 * S2**4 * u**2 + (8 * chieff**2 +
(S1**2 * (-5 + (-8 * S2**2 * u**2 + 40 * u * chieff)) + -2 * S2**2 *
(-5 + 4 * u * chieff * (1 + u * chieff)))))) + (2 * q**3 * (32 *
S1**4 * u**3 + (32 * S2**4 * u**3 + (chieff * (-1 + 8 * u * chieff *
(3 + u * chieff)) + (2 * S2**2 * u * (-1 + u * chieff * (17 + 8 * u *
chieff)) + 2 * S1**2 * u * (-1 + (40 * S2**2 * u**2 + u * chieff *
(17 + 8 * u * chieff))))))) + (q * (chieff + 2 * u * (S1**2 * (1 +
-48 * S2**2 * u**2) + S2**2 * (1 + -2 * u * (12 * S2**2 * u +
chieff)))) + (q**5 * (chieff + 2 * u * (S2**2 + S1**2 * (1 + -2 * u *
(12 * (S1**2 + 2 * S2**2) * u + chieff)))) + -2 * q**4 * u * (5 *
S2**2 + (44 * S1**4 * u**2 + (-8 * (5 * S2**4 * u**2 + (5 * S2**2 * u
* chieff + chieff**2)) + 2 * S1**2 * (-5 + 4 * u * (chieff + u *
(S2**2 + chieff**2))))))))))))
# Machine generated with polycoefficients.nb
coeff2 = -16 * ((1 + q))**2 * (16 * (-1 + q) * q**3 * ((1 + q))**4 *
(10 + (-8 + q) * q) * S1**6 * u**4 + (-16 * ((-1 + q))**3 * ((1 +
q))**4 * S2**6 * u**4 + (-1 * ((-1 + q**2))**2 * S2**4 * u**2 * (((1
+ q))**2 * (-8 + (-20 + q) * q) + (8 * (-4 + q) * q * (1 + q) * u *
chieff + 16 * q**2 * u**2 * chieff**2)) + (-1 * q**2 * (((1 + q) *
S2**2 * u + q * chieff))**2 * ((-1 + q) * ((1 + q))**2 * (-1 + (q +
48 * S2**2 * u**2)) + (8 * q * (1 + q) * (5 + q) * u * chieff + 16 *
q**2 * u**2 * chieff**2)) + (2 * q**2 * ((1 + q))**2 * S1**4 * u**2 *
((-1 + q) * ((1 + q))**2 * ((-1 + q) * (-3 + (30 * q + 4 * q**2)) +
-72 * (2 + (-2 + q) * q) * S2**2 * u**2) + (4 * q * (1 + q) * (-30 +
q * (39 + q * (-19 + 4 * q))) * u * chieff + -8 * q**2 * (6 + (-6 +
q) * q) * u**2 * chieff**2)) + (-4 * q * (-1 * (1 + q) * S2**2 * u +
-1 * q * chieff) * (-1 * ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u *
(-10 + (q + 24 * S2**2 * u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 *
(-1 + (q + 4 * (1 + 2 * q) * S2**2 * u**2)) * chieff + (-8 * q**2 *
(1 + q) * u * (2 + (q + 2 * (-1 + q) * S2**2 * u**2)) * chieff**2 +
-16 * q**3 * u**2 * chieff**3))) + (q * (1 + q) * S1**2 * ((-1 + q) *
((1 + q))**3 * (((-1 + q))**3 * q + (4 * (-1 + q) * (15 + q * (-29 +
15 * q)) * S2**2 * u**2 + 144 * (1 + 2 * (-1 + q) * q) * S2**4 *
u**4)) + (2 * q * ((1 + q))**2 * u * (((-1 + q))**2 * (-3 + q * (23 +
4 * q)) + 12 * (1 + q) * (1 + q**2) * S2**2 * u**2) * chieff + (8 *
q**2 * (1 + q) * u**2 * (-12 + (-2 * q + (-11 * q**2 + (q**3 + 4 * (3
+ q * (-5 + 3 * q)) * S2**2 * u**2)))) * chieff**2 + -32 * q**3 * (3
+ (-1 + q) * q) * u**3 * chieff**3))) + (S2**2 * (((-1 + q**2))**4 +
(2 * ((-1 + q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (8
* (-1 + q) * q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 +
32 * q**3 * (-1 + q**2) * u**3 * chieff**3))) + -1 * q**2 * chieff**2
* (1 + q * (8 * u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u
* chieff))**2))))))))))))
# Machine generated with polycoefficients.nb
coeff1 = -16 * (1 + q) * (-16 * ((-1 + q))**2 * q**3 * ((1 + q))**5 *
(-5 + 2 * q) * S1**8 * u**5 + (-4 * (-1 + q) * q**2 * ((1 + q))**3 *
S1**6 * u**3 * ((-1 + q) * ((1 + q))**2 * (-1 + (15 * q + (4 * q**2 +
8 * (6 + (-1 + q) * q) * S2**2 * u**2))) + (2 * q * (1 + q) * (20 + q
* (-29 + 12 * q)) * u * chieff + -8 * (-2 + q) * q**2 * u**2 *
chieff**2)) + (-2 * q * (((1 + q) * S2**2 * u + q * chieff))**2 * (-1
* ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u * (-10 + (q + 24 * S2**2 *
u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 * (-1 + (q + 4 * (1 + 2 *
q) * S2**2 * u**2)) * chieff + (-8 * q**2 * (1 + q) * u * (2 + (q + 2
* (-1 + q) * S2**2 * u**2)) * chieff**2 + -16 * q**3 * u**2 *
chieff**3))) + (-2 * q * ((1 + q))**2 * S1**4 * u * (((-1 + q))**2 *
((1 + q))**3 * (((-1 + q))**2 * q + (2 * (15 + q * (-55 + 2 * q * (9
+ 2 * q))) * S2**2 * u**2 + -72 * (1 + q**2) * S2**4 * u**4)) + ((-1
+ q) * q * ((1 + q))**2 * u * (3 + (-52 * q + (33 * q**2 + (16 * q**3
+ 4 * (-3 + 2 * q**2 * (-7 + 4 * q)) * S2**2 * u**2)))) * chieff +
(-8 * q**2 * (1 + q) * u**2 * (6 + (-16 * q + (18 * q**2 + (-5 * q**3
+ 2 * (-1 + q) * (3 + (-1 + q) * q) * S2**2 * u**2)))) * chieff**2 +
-16 * q**3 * (3 + q * (-5 + 3 * q)) * u**3 * chieff**3))) + (S1**2 *
(-32 * ((-1 + q))**2 * ((1 + q))**5 * (1 + q * (-1 + 6 * q)) * S2**6
* u**5 + (-4 * (-1 + q) * ((1 + q))**3 * S2**4 * u**3 * ((-1 + q) *
((1 + q))**2 * (4 + q * (18 + 5 * q * (-11 + 3 * q))) + (2 * q * (1 +
q) * (-8 + (14 * q + 3 * q**3)) * u * chieff + 8 * q**2 * (1 + q *
(-1 + 3 * q)) * u**2 * chieff**2)) + (2 * ((1 + q))**3 * S2**2 * u *
(-1 * ((-1 + q))**4 * ((1 + q))**2 * (1 + (-12 + q) * q) + (-2 * q *
((-1 + q**2))**2 * (4 + q * (-7 + 4 * q)) * u * chieff + (-8 * q**2 *
(1 + q * (-8 + q * (20 + (-8 + q) * q))) * u**2 * chieff**2 + 16 *
(-2 + q) * q**3 * (-1 + 2 * q) * u**3 * chieff**3))) + 2 * q**2 *
chieff * (-1 * ((-1 + q**2))**4 + (-1 * ((-1 + q))**2 * ((1 + q))**3
* (-1 + q * (18 + 7 * q)) * u * chieff + (4 * q * ((1 + q))**2 * (2 +
q * (-5 + 19 * q)) * u**2 * chieff**2 + 16 * q**2 * (1 + q**2 * (2 +
3 * q)) * u**3 * chieff**3)))))) + -2 * (-1 * (1 + q) * S2**2 * u +
-1 * q * chieff) * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 * u**4
+ (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20 + q)
* q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 * u**2 *
chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 + q))**2 *
q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q) * q**2 *
((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3 * (-1 +
q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8 * u *
chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u *
chieff))**2))))))))))))
# Machine generated with polycoefficients.nb
coeff0 = -16 * (16 * ((-1 + q))**3 * q**3 * ((1 + q))**6 * S1**10 *
u**6 + (-1 * ((-1 + q))**2 * q**2 * ((1 + q))**4 * S1**8 * u**4 *
(((1 + q))**2 * (1 + (-20 * q + (-8 * q**2 + 16 * (-3 + (q + 2 *
q**2)) * S2**2 * u**2))) + (-8 * q * (1 + q) * (-5 + 8 * q) * u *
chieff + 16 * q**2 * u**2 * chieff**2)) + ((-1 + q) * q * ((1 +
q))**3 * S1**6 * u**2 * (q * ((-1 + q**2))**3 + (-4 * (-1 + q) * ((1
+ q))**3 * (-5 + q * (27 + q * (-3 + 8 * q))) * S2**2 * u**2 + (16 *
((-1 + q))**2 * ((1 + q))**3 * (3 + q * (6 + q)) * S2**4 * u**4 + (-2
* (-1 + q) * q * ((1 + q))**2 * u * (1 + (-25 * q + (-12 * q**2 + 4 *
(-1 + (q + 12 * q**2)) * S2**2 * u**2))) * chieff + (8 * q**2 * (1 +
q) * u**2 * (4 + (-18 * q + (11 * q**2 + 4 * (-1 + q**2) * S2**2 *
u**2))) * chieff**2 + 32 * (1 + -2 * q) * q**3 * u**3 *
chieff**3))))) + (((1 + q))**2 * S1**4 * u * (-16 * ((-1 + q))**3 *
((1 + q))**4 * (1 + 3 * q * (2 + q)) * S2**6 * u**5 + (2 * S2**4 *
u**3 * (((-1 + q))**2 * ((1 + q))**4 * (4 + q * (6 + q * (61 + (6 * q
+ 4 * q**2)))) + (4 * ((-1 + q))**2 * q * ((1 + q))**4 * (4 + (q + 4
* q**2)) * u * chieff + -8 * q**2 * ((-1 + q**2))**2 * (1 + q * (4 +
q)) * u**2 * chieff**2)) + (chieff * (2 * ((-1 + q))**4 * q**2 * ((1
+ q))**3 + (((q + -1 * q**3))**2 * (-1 + q * (40 + 23 * q)) * u *
chieff + (8 * q**3 * (1 + q) * (-1 + q * (14 + 5 * (-4 + q) * q)) *
u**2 * chieff**2 + -16 * q**4 * (1 + 6 * (-1 + q) * q) * u**3 *
chieff**3))) + (-1 + q) * (1 + q) * S2**2 * u * (-1 * ((-1 +
q**2))**3 * (-1 + 2 * q * (12 + 5 * q)) + (-2 * (-1 + q) * q * ((1 +
q))**2 * (-4 + q * (29 + q * (-21 + 32 * q))) * u * chieff + (-8 *
q**2 * (1 + q) * (1 + 2 * (-2 + q) * q * (1 + 4 * q)) * u**2 *
chieff**2 + 32 * q**3 * (1 + q * (-1 + 3 * q)) * u**3 *
chieff**3)))))) + ((1 + q) * S1**2 * (16 * ((-1 + q))**3 * ((1 +
q))**5 * (2 + 3 * q) * S2**8 * u**6 + (q**2 * chieff**2 * (((-1 +
q))**4 * ((1 + q))**3 + (2 * q * (5 + 3 * q) * ((-1 + q**2))**2 * u *
chieff + (-8 * q**2 * (1 + q) * (-4 + q * (7 + q)) * u**2 * chieff**2
+ 32 * (1 + -2 * q) * q**3 * u**3 * chieff**3))) + ((-1 + q) * ((1 +
q))**2 * S2**4 * u**2 * ((-10 + (-24 + q) * q) * ((-1 + q**2))**3 +
(2 * (-1 + q) * q * ((1 + q))**2 * (-32 + q * (21 + q * (-29 + 4 *
q))) * u * chieff + (8 * q**2 * (1 + q) * (8 + q * (-14 + (-4 + q) *
q)) * u**2 * chieff**2 + -32 * q**3 * (3 + (-1 + q) * q) * u**3 *
chieff**3))) + (S2**2 * (-1 * ((-1 + q))**6 * ((1 + q))**5 + (-10 *
((-1 + q))**4 * q * ((1 + q))**5 * u * chieff + (-2 * ((-1 + q))**2 *
q**2 * ((1 + q))**3 * (11 + q * (-24 + 11 * q)) * u**2 * chieff**2 +
(16 * q**3 * ((1 + q))**3 * (2 + q * (-3 + 2 * q)) * u**3 * chieff**3
+ 32 * q**4 * (1 + q) * (3 + q * (-5 + 3 * q)) * u**4 * chieff**4))))
+ 4 * ((-1 + q))**2 * ((1 + q))**4 * S2**6 * u**4 * (-8 + q * (-5 +
(-24 * q + (-22 * q**2 + (5 * q**3 + (2 * (-4 + q) * (3 + q) * u *
chieff + 8 * q * u**2 * chieff**2)))))))))) + -1 * (((1 + q) * S2**2
* u + q * chieff))**2 * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 *
u**4 + (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20
+ q) * q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 *
u**2 * chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 +
q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q)
* q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3
* (-1 + q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8
* u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u *
chieff))**2))))))))))))
return np.stack([coeff5, coeff4, coeff3, coeff2, coeff1, coeff0])
def kapparesonances(u, chieff, q, chi1, chi2):
"""
Regularized angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes kappa for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
kappamin,kappamax = kapparesonances(u,chieff,q,chi1,chi2)
Parameters
----------
u: float
Compactified separation 1/(2L).
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
kappamin: float
Minimum value of the regularized angular momentum kappa.
kappamax: float
Maximum value of the regularized angular momentum kappa.
"""
u = np.atleast_1d(u)
chieff = np.atleast_1d(chieff)
q = np.atleast_1d(q)
chi1 = np.atleast_1d(chi1)
chi2 = np.atleast_1d(chi2)
kapparoots = wraproots(kappadiscriminant_coefficients, u, chieff, q, chi1, chi2)
# There are in principle five solutions, but only two are physical.
def _compute(kapparoots, u, chieff, q, chi1, chi2):
kapparoots = kapparoots[np.isfinite(kapparoots)]
Sroots = Satresonance(kappa=kapparoots, u=np.tile(u, kapparoots.shape), chieff=np.tile(chieff, kapparoots.shape), q=np.tile(q, kapparoots.shape), chi1=np.tile(chi1, kapparoots.shape), chi2=np.tile(chi2, kapparoots.shape))
Smin, Smax = Slimits_S1S2(np.tile(q, kapparoots.shape), np.tile(chi1, kapparoots.shape), np.tile(chi2, kapparoots.shape))
kappares = kapparoots[np.logical_and(Sroots > Smin, Sroots < Smax)]
assert len(kappares) <= 2, "I found more than two resonances, this should not be possible."
# If you didn't find enough solutions, append nans
kappares = np.concatenate([kappares, np.repeat(np.nan, 2-len(kappares))])
return kappares
kappamin, kappamax = np.array(list(map(_compute, kapparoots, u, chieff, q, chi1, chi2))).T
return np.stack([kappamin, kappamax])
def kappainfresonances(chieff, q, chi1, chi2):
"""
Regularized angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes kappa for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
kappainfmin,kappainfmax = kappainfresonances(chieff,q,chi1,chi2)
Parameters
----------
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
kappainfmin: float
Minimum value of the asymptotic angular momentum kappainf.
kappainfmax: float
Maximum value of the asymptotic angular momentum kappainf.
"""
chieff = np.atleast_1d(chieff)
q = np.atleast_1d(q)
S1, S2 = spinmags(q, chi1, chi2)
kappainfmin = np.maximum((chieff - (q**-1-q)*S2)/(1+q), (chieff - (q**-1-q)*S1)/(1+q**-1))
kappainfmax = np.minimum((chieff + (q**-1-q)*S2)/(1+q), (chieff + (q**-1-q)*S1)/(1+q**-1))
return np.stack([kappainfmin, kappainfmax])
def Jresonances(r, chieff, q, chi1, chi2):
"""
Total angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes J for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
Jmin,Jmax = Jresonances(r,chieff,q,chi1,chi2)
Parameters
----------
r: float
Binary separation.
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
Jmin: float
Minimum value of the total angular momentum J.
Jmax: float
Maximum value of the total angular momentum J.
"""
u = eval_u(r, q)
kappamin, kappamax = kapparesonances(u, chieff, q, chi1, chi2)
Jmin = eval_J(kappa=kappamin, r=r, q=q)
Jmax = eval_J(kappa=kappamax, r=r, q=q)
return
|
np.stack([Jmin, Jmax])
|
numpy.stack
|
# -*- coding: utf-8 -*-
"""
This script is used to perform post-hoc analysis and visualization:
the classification performance of subsets (only for Schizophrenia Spectrum: SZ and Schizophreniform).
Unless otherwise specified, all results are for Schizophrenia Spectrum.
"""
#%%
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
sys.path.append(r'D:\My_Codes\easylearn\eslearn\statistics')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.pyplot import MultipleLocator
import pickle
from lc_binomialtest import binomial_test
#%% Inputs
scale_550_file = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
scale_206_file = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
scale_206_drug_file = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_109_drug.xlsx'
classification_results_pooling_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_pooling.npy'
classification_results_results_leave_one_site_cv_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_leave_one_site_cv.npy'
classification_results_feu_file = r'D:\WorkStation_2018\SZ_classification\Data\ML_data_npy\results_unmedicated_and_firstepisode_550.npy'
is_plot = 1
is_savefig = 1
#%% Load and proprocess
scale_550 = pd.read_excel(scale_550_file)
scale_206 = pd.read_csv(scale_206_file)
scale_206_drug = pd.read_excel(scale_206_drug_file)
results_pooling = np.load(classification_results_pooling_file, allow_pickle=True)
results_leave_one_site_cv = np.load(classification_results_results_leave_one_site_cv_file, allow_pickle=True)
results_feu = np.load(classification_results_feu_file, allow_pickle=True)
results_special = results_pooling['special_result']
results_special = pd.DataFrame(results_special)
results_special.iloc[:, 0] = np.int32(results_special.iloc[:, 0])
scale_206['ID'] = scale_206['ID'].str.replace('NC','10')
scale_206['ID'] = scale_206['ID'].str.replace('SZ','20')
scale_206['ID'] = np.int32(scale_206['ID'])
scale_550['folder'] = np.int32(scale_550['folder'])
scale_206_drug['P0001'] = scale_206_drug['P0001'].str.replace('NC','10')
scale_206_drug['P0001'] = scale_206_drug['P0001'].str.replace('SZ','20')
scale_206_drug['P0001'] = np.int32(scale_206_drug['P0001'])
# Filter subjects that have .mat files
scale_550_selected = pd.merge(results_special, scale_550, left_on=0, right_on='folder', how='inner')
scale_206_selected = pd.merge(results_special, scale_206, left_on=0, right_on='ID', how='inner')
scale_206_selected = pd.merge(scale_206_selected, scale_206_drug, left_on=0, right_on='P0001', how='inner')
#%% ---------------------------------Calculate performance for Schizophrenia Spectrum subgroups-------------------------------
## Step 1: Dataset1
duration = 18 # Upper limit of first episode:
""" reference:
1. <NAME>, <NAME>, Schooler NR, et al. Comprehensive versus usual
community care for first-episode psychosis: 2-year outcomes from the NIMH
RAISE early treatment program. Am J Psychiatry. 2016;173(4):362-372. doi:10.1176/appi.ajp.2015.15050632.
2. Cognitive Impairment in Never-Medicated Individuals on the Schizophrenia Spectrum. doi:10.1001/jamapsychiatry.2020.0001"
"""
data_firstepisode_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['首发'] == 1) & (scale_550_selected['病程月'] <= duration) & (scale_550_selected['病程月'] >= 6)]
data_not_firstepisode_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & ((scale_550_selected['首发'] == 0) | (scale_550_selected['病程月'] > duration))] # Including the persistent patients
data_schizophreniform_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] < 6)]
data_shortdurationSZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] <= duration) & (scale_550_selected['病程月'] >= 6)]
data_longdurationSZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['病程月'] > duration)]
onsetage_all_550 = scale_550_selected['Age_of_first_episode'][scale_550_selected['诊断']==3].dropna()
ind_young_onsetage_550 = onsetage_all_550.index[onsetage_all_550.values <= np.median(onsetage_all_550)]
ind_old_onsetage_550 = onsetage_all_550.index[onsetage_all_550.values > np.median(onsetage_all_550)]
data_young_onset_age_550 = scale_550_selected[scale_550_selected['诊断']==3].loc[ind_young_onsetage_550]
data_old_onset_age_550 = scale_550_selected[scale_550_selected['诊断']==3].loc[ind_old_onsetage_550]
data_medicated_SSD_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['用药'] == 1)]
data_unmedicated_SSD_550 = scale_550_selected[(scale_550_selected['诊断']==3) & (scale_550_selected['用药'] == 0) ]
# Frist episode and nerver medicated
data_unmedicated_schizophreniform_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] < 6) &
(scale_550_selected['用药'] == 0)]
data_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] >= 6) &
(scale_550_selected['用药'] == 0)]
data_firstepisode_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['首发'] == 1) &
(scale_550_selected['病程月'] <= duration) &
(scale_550_selected['病程月'] >= 6) &
(scale_550_selected['用药'] == 0)]
data_chronic_unmedicated_SZ_550 = scale_550_selected[(scale_550_selected['诊断']==3) &
(scale_550_selected['病程月'] > duration) &
(scale_550_selected['用药'] == 0)]
# data_unmedicated_SSD_550['folder'].to_csv(r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\feu_63.txt', index=False)
## Calculating Accuracy
acc_firstepisode_SZ_550 = np.sum(data_firstepisode_SZ_550[1]-data_firstepisode_SZ_550[3]==0)/len(data_firstepisode_SZ_550)
acc_not_firstepisode_SZ_550 = np.sum(data_not_firstepisode_SZ_550[1]-data_not_firstepisode_SZ_550[3]==0)/len(data_not_firstepisode_SZ_550)
acc_schizophreniform_550 = np.sum(data_schizophreniform_550[1]-data_schizophreniform_550[3]==0)/len(data_schizophreniform_550)
acc_shortduration_550 = np.sum(data_shortdurationSZ_550[1]-data_shortdurationSZ_550[3]==0)/len(data_shortdurationSZ_550)
acc_longduration_550 = np.sum(data_longdurationSZ_550[1]-data_longdurationSZ_550[3]==0)/len(data_longdurationSZ_550)
acc_young_onsetage_550 = np.sum(data_young_onset_age_550[1]-data_young_onset_age_550[3]==0)/len(data_young_onset_age_550)
acc_old_onsetage_550 = np.sum(data_old_onset_age_550[1]-data_old_onset_age_550[3]==0)/len(data_old_onset_age_550)
acc_medicated_SSD_550 = np.sum(data_medicated_SSD_550[1]-data_medicated_SSD_550[3]==0)/len(data_medicated_SSD_550)
acc_ummedicated_SSD_550 = np.sum(data_unmedicated_SSD_550[1]-data_unmedicated_SSD_550[3]==0)/len(data_unmedicated_SSD_550)
acc_unmedicated_schizophreniform_550 = np.sum(data_unmedicated_schizophreniform_550[1]-data_unmedicated_schizophreniform_550[3]==0) / len(data_unmedicated_schizophreniform_550)
acc_unmedicated_SZ_550 = np.sum(data_unmedicated_SZ_550[1]-data_unmedicated_SZ_550[3]==0) / len(data_unmedicated_SZ_550)
acc_firstepisode_unmedicated_SZ_550 = np.sum(data_firstepisode_unmedicated_SZ_550[1]-data_firstepisode_unmedicated_SZ_550[3]==0) / len(data_firstepisode_unmedicated_SZ_550)
acc_chronic_unmedicated_SZ_550 = np.sum(data_chronic_unmedicated_SZ_550[1]-data_chronic_unmedicated_SZ_550[3]==0) / len(data_chronic_unmedicated_SZ_550)
print(f'Accuracy of firstepisode in dataset550 = {acc_firstepisode_SZ_550}')
print(f'Accuracy of none-firstepisode in dataset550 = {acc_not_firstepisode_SZ_550}')
print(f'Accuracy of schizophreniform in dataset550 = {acc_schizophreniform_550}')
print(f'Accuracy of shortduration in dataset550 = {acc_shortduration_550}')
print(f'Accuracy of longduration in dataset550 = {acc_longduration_550}')
print(f'Accuracy of young onsetage of 550 = {acc_young_onsetage_550}')
print(f'Accuracy of old onsetage of 550 = {acc_old_onsetage_550}')
print(f'Accuracy of medicated SSD in dataset550 = {acc_medicated_SSD_550}')
print(f'Accuracy of ummedicated_SSD in dataset550 = {acc_ummedicated_SSD_550}')
print(f'Accuracy of firstepisode unmedicated SZ in dataset550 = {acc_firstepisode_unmedicated_SZ_550}')
print('-'*50)
# Step 2: Dataset 2
## Preprocessing
scale_206_selected['duration'] = [np.int32(duration) if duration != ' ' else 10000 for duration in scale_206_selected['duration']]
scale_206_selected['firstepisode'] = [np.int32(firstepisode) if firstepisode != ' ' else 10000 for firstepisode in scale_206_selected['firstepisode']]
scale_206_selected['CPZ_eq'] = [np.int32(duration) if duration != ' ' else 0 for duration in scale_206_selected['CPZ_eq']]
scale_206_selected['onsetage'] = [np.int32(duration) if duration != ' ' else 0 for duration in scale_206_selected['onsetage']]
## Filter subgroups
data_firstepisode_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['firstepisode'] == 1) & (scale_206_selected['duration'] <= duration)]
data_notfirstepisode_206 = scale_206_selected[(scale_206_selected['group']==1) & ((scale_206_selected['firstepisode'] == 0) | (scale_206_selected['duration'] > duration))]
data_shortduration_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['duration'] <= duration)]
data_longduration_206 = scale_206_selected[(scale_206_selected['group']==1) & (scale_206_selected['duration'] > duration)]
onsetage = scale_206_selected['onsetage'][scale_206_selected['group']==1]
data_young_onsetage_206 = scale_206_selected[(scale_206_selected['group']==1) & (onsetage <= np.median(onsetage))]
data_old_onsetage_206 = scale_206_selected[(scale_206_selected['group']==1) & (onsetage > np.median(onsetage))]
CPZ_eq = scale_206_selected['CPZ_eq'][scale_206_selected['group']==1]
data_drugless_206 = scale_206_selected[(scale_206_selected['group']==1) & (CPZ_eq <= np.median(CPZ_eq))]
data_drugmore_206 = scale_206_selected[(scale_206_selected['group']==1) & (CPZ_eq > np.median(CPZ_eq))]
## Calculating acc
acc_firstepisode_206 = np.sum(data_firstepisode_206[1]-data_firstepisode_206[3]==0)/len(data_firstepisode_206)
acc_notfirstepisode_206 = np.sum(data_notfirstepisode_206[1]-data_notfirstepisode_206[3]==0)/len(data_notfirstepisode_206)
acc_shortduration_206 = np.sum(data_shortduration_206[1]-data_shortduration_206[3]==0)/len(data_shortduration_206)
acc_longduration_206 = np.sum(data_longduration_206[1]-data_longduration_206[3]==0)/len(data_longduration_206)
acc_young_onsetage_206 = np.sum(data_young_onsetage_206[1]-data_young_onsetage_206[3]==0)/len(data_young_onsetage_206)
acc_old_onsetage_206 =
|
np.sum(data_old_onsetage_206[1]-data_old_onsetage_206[3]==0)
|
numpy.sum
|
from .context import qosy as qy
import numpy as np
import scipy.sparse as ss
import scipy.sparse.linalg as ssla
def test_sign():
perm = np.array([0,1,2,3],dtype=int)
expected_sign = 1
sign = qy.tools.sign(perm)
assert(sign == expected_sign)
perm = np.array([1,0,2,3],dtype=int)
expected_sign = -1
sign = qy.tools.sign(perm)
assert(sign == expected_sign)
perm = np.array([1,0,3,2],dtype=int)
expected_sign = 1
sign = qy.tools.sign(perm)
assert(sign == expected_sign)
perm = np.array([2,4,1,3,0,5],dtype=int)
expected_sign = -1
sign = qy.tools.sign(perm)
assert(sign == expected_sign)
def test_sort_sign():
arr = np.array([-1,3,1])
expected_sorted_arr = np.sort(arr)
expected_sign = -1
(sorted_arr, sign) = qy.tools.sort_sign(arr)
assert(np.allclose(sorted_arr, expected_sorted_arr))
assert(sign == expected_sign)
arr = [-1, 3, 5., 1]
expected_sorted_arr = np.sort(arr)
expected_sign = 1
(sorted_arr, sign) = qy.tools.sort_sign(arr)
assert(np.allclose(np.array(sorted_arr), expected_sorted_arr))
assert(sign == expected_sign)
def test_sort_sign_mergesort():
np.random.seed(42)
num_trials = 100
arr_length = 100
for ind_trial in range(num_trials):
arr = 2.0*np.random.rand(arr_length) - 1.0
(sorted_arr1, sign1) = qy.tools.sort_sign(arr, method='insertionsort')
(sorted_arr2, sign2) = qy.tools.sort_sign(arr, method='mergesort')
assert(np.allclose(sorted_arr1, sorted_arr2))
assert(np.isclose(sign1, sign2))
def test_compare():
assert(qy.tools.compare((0,1), (0,)) > 0)
assert(qy.tools.compare((0,1), (0,1)) == 0)
assert(qy.tools.compare((0,1), (1,0)) < 0)
def test_swap():
assert(qy.tools.swap('A B C', 'A', 'B') == 'B A C')
assert(qy.tools.swap('1 2 3', '1', '4') == '4 2 3')
assert(qy.tools.swap('Up Up Dn', 'Up', 'Dn') == 'Dn Dn Up')
assert(qy.tools.swap('Up Up Dn', 'Dn', 'Up') == 'Dn Dn Up')
assert(qy.tools.swap('1 2 3', 'X', 'Y') == '1 2 3')
def test_replace():
assert(qy.tools.replace('ABC', {'A':'AB', 'B':'D', 'C':'AC'}) == 'ABDAC')
assert(qy.tools.replace(' 0 100 10 1 110', {' 0':'_{0}', ' 1' : '_{1}', ' 10' : '_{10}', ' 100' : '_{100}', ' 110' : '_{110}'}) == '_{0}_{100}_{10}_{1}_{110}')
def test_maximal_cliques():
# Toy graph on https://en.wikipedia.org/wiki/Bron%E2%80%93Kerbosch_algorithm
adjacency_lists = [[1,4], [0,2,4], [1,3], [2,4,5], [0,1,3], [3]]
maximal_cliques = qy.tools.maximal_cliques(adjacency_lists)
maximal_cliques_set = set([tuple(clique) for clique in maximal_cliques])
expected_maximal_cliques_set = set([(0,1,4), (1,2), (2,3), (3,4), (3,5)])
assert(maximal_cliques_set == expected_maximal_cliques_set)
def test_connected_components():
# Graph with two connected components, [0,1,2,3] and [4,5]
adjacency_lists = [[1,2], [0,2,3], [0,1], [1], [5], [4]]
c_components = qy.tools.connected_components(adjacency_lists, mode='BFS')
expected_c_comps = set([(0,1,2,3), (4,5)])
assert(len(c_components) == 2)
for c_comp in c_components:
c_comp_tuple = tuple(np.sort(c_comp))
assert(c_comp_tuple in expected_c_comps)
c_components = qy.tools.connected_components(adjacency_lists, mode='DFS')
expected_c_comps = set([(0,1,2,3), (4,5)])
assert(len(c_components) == 2)
for c_comp in c_components:
c_comp_tuple = tuple(np.sort(c_comp))
assert(c_comp_tuple in expected_c_comps)
def test_gram_schmidt():
# Do a simple check by hand.
matrix = np.array([[1., 1., 2.],\
[0., 1., -2.]])
vecs = qy.tools.gram_schmidt(matrix, tol=1e-12)
expected_vecs = np.array([[1., 0.],\
[0., 1.]])
assert(np.allclose(vecs, expected_vecs))
matrix = np.array([[1., 1., 2.],\
[1., 0., -2.]])
vecs = qy.tools.gram_schmidt(matrix, tol=1e-12)
expected_vecs = 1./np.sqrt(2.)*np.array([[1., 1.],\
[1., -1.]])
assert(np.allclose(vecs, expected_vecs))
matrix = np.array([[1., 1., 2.],\
[1., 0., -2.]])
vecs = qy.tools.gram_schmidt(ss.csc_matrix(matrix), tol=1e-12)
expected_vecs = 1./np.sqrt(2.)*np.array([[1., 1.],\
[1., -1.]])
assert(np.allclose(vecs.toarray(), expected_vecs))
# Do a simple complex number check by hand.
matrix = np.array([[1., 1.],\
[1.j, 1.]])
vecs = qy.tools.gram_schmidt(matrix, tol=1e-12)
expected_vecs = 1./np.sqrt(2.)*np.array([[1., 1.],\
[1j, -1j]])
# First vector agrees
assert(np.allclose(vecs[:,0], expected_vecs[:,0]))
# Second vector agrees up to a phase.
overlap = np.vdot(vecs[:,1], expected_vecs[:,1])
assert(np.isclose(np.abs(overlap), 1.0))
n = 10
m = 5
# For random real matrices, check that the
# orthogonalized vectors still span
# the same space.
num_trials = 50
|
np.random.seed(42)
|
numpy.random.seed
|
"""
This module contains functions related to time conversions
"""
# Standard library imports
from math import isclose
from functools import wraps
from time import process_time
# Third party imports
import numpy as np
from numpy import cos, sin, sqrt, abs
from toolz import valmap
from numba import jit
# Local application imports
from .constants import SECONDS_IN_DAY, AU_m
#https://en.wikipedia.org/wiki/Standard_gravitational_parameter
mu_m3s_2__by_name = {
"Sun" : 1.32712440018e20,
"Mercury" : 2.2032e13,
"Venus" :3.24859e14,
"Earth": 3.98604418e14,
"Mars" : 4.282837e13,
"Jupiter" : 1.26686534e17,
"Saturn" : 3.7931187e16,
"Uranus" : 5.793939e15,
"Neptune" : 6.836529e15,
"Pluto" : 8.71e11,
"Ceres" : 6.26325e10,
"Eris" : 1.108e12
}
PERTURBING_PLANETS = mu_m3s_2__by_name.keys() - ["Sun","Ceres","Eris"]
def to_AU_days(mu_m3s_2):
return mu_m3s_2 * SECONDS_IN_DAY*SECONDS_IN_DAY/(AU_m*AU_m*AU_m)
# Gravitational parameters in AU/days
mu_by_name = valmap(to_AU_days,mu_m3s_2__by_name)
mu_Sun=0.0002959122082322128 #mu_by_name["Sun"] = GM
class NoConvergenceError(Exception):
"""Exception raised when the newton method does not converge
Attributes:
root -- value of the root after the number of iterations
functions_calls --
"""
def __init__(self, root, function_calls,iterations, M=-1000, message="No convergence error"):
self.message = message
self.M = M
self.root = root
self.function_calls = function_calls
self.iterations = iterations
super().__init__(self.message)
def kahan_sum(xs):
"""[summary]
Parameters
----------
xs : [type]
[description]
Returns
-------
[type]
[description]
"""
s = 0.; e = 0.
for x in xs:
temp = s
y = x + e
s = temp + y
e = (temp - s) + y
return s
class KahanAdder:
"""[summary]
"""
def __init__(self):
self.s = 0.
self.e = 0.
self.prev_s = 0.
def add(self, value):
temp = self.s
y = value + self.e
self.prev_s = self.s
self.s = temp + y
self.e = (temp-self.s) + y
def add_values(self, values):
for value in values:
temp = self.s
y = value + self.e
self.s = temp + y
self.e = (temp-self.s) + y
def result(self):
return self.s
def converged(self, atol=1.e-10):
return
|
abs(self.s - self.prev_s)
|
numpy.abs
|
#!usr/bin/python
import json
import numpy as np
import pandas as pd
from scipy.optimize import differential_evolution, minimize
from src.models.utils import choose, egreedy, softmax
class ChoiceModel(object):
"""Base class for probabilistic choice models
Contains methods shared across models to
1) simulate choices (`simulate_choices`)
2) compute negative log-likelihood (`compute_nll`)
3) perform parameter estimation (`fit`)
"""
def __init__(self):
super(ChoiceModel, self).__init__()
def simulate_choices(self, parameters):
"""For given parameters, predict choice probabilities and generate choices from them."""
choices = choose(self.predict_choiceprobs(parameters))
return choices
def recover(self, parameters_gen=None, **fit_kwargs):
if parameters_gen is None:
print(
"No `parameters_gen` for simulation given. Trying to estimate some from attached data..."
)
parameters_gen, nll = self.fit(**fit_kwargs)
# Simulate synthetic data
self.choices = self.simulate_choices(parameters=parameters_gen)
# Re-estimate parameters
parameters_rec, nll = self.fit(**fit_kwargs)
# Format result
recovery_df = pd.DataFrame(
{
parameter_name + "_gen": parameter_gen
for parameter_name, parameter_gen in zip(
self.parameter_names, parameters_gen
)
},
index=[0],
)
for parameter_name, parameter_rec in zip(self.parameter_names, parameters_rec):
recovery_df[parameter_name + "_rec"] = parameter_rec
recovery_df["nll"] = nll
return recovery_df
def compute_nll(self, parameters, verbose=False, nonzeroconst=1e-6):
"""Compute negative log-likelihood of the data, given parameters."""
choiceprobs = self.predict_choiceprobs(parameters)
chosenprobs = choiceprobs[
np.arange(choiceprobs.shape[0]).astype(int), self.choices.astype(int)
]
nll = -np.sum(np.log(chosenprobs + nonzeroconst))
if verbose > 1:
print(
"\t",
"Subject",
self.subject,
"\t",
*np.round(parameters, 2),
"\tNLL",
np.round(nll, 2),
end="\r"
)
return nll
def fit(
self, method="minimize", n_runs=1, seed=None, verbose=False, **method_kwargs
):
"""Estimate best fitting parameters using maximum log-likelihood.
Parameters:
-----------
method : str, optional
Optimization method to use. Must be one of ['minimize', 'differential_evolution'], defaults to 'minimize'.
n_runs : int, optional
Number of optimization runs. Should probably be more than 1 if method='minimize'. Defaults to 1.
seed : int, optional
Random seed. Defaults to no seed.
verbose : int, optional
Verbosity toggle. Prints some stuff if > 0. Prints more stuff if > 1... Defaults to 0.
**method_kwargs : optional
Additional keyword arguments to be passed on to the optimizer.
Returns:
-------
tuple
(maximum-likelihood estimates, minimum negative log-likelihood)
"""
best_nll = np.inf
best_x = np.zeros(self.n_parameters) * np.nan
for run in range(n_runs):
if verbose > 0:
print(
"{}\tSubject {}\tRun {} of {} ({:.0f}%)".format(
self.label,
self.subject,
run + 1,
n_runs,
100 * (run + 1) / n_runs,
),
# end="\r",
)
if seed is not None:
if isinstance(self.subject, str):
subject_for_seed = int(self.subject.split("-")[0])
else:
subject_for_seed = self.subject
np.random.seed(seed * subject_for_seed + seed * run)
if method == "minimize":
x0 = [
np.random.uniform(*self.parameter_bounds[p])
for p in range(self.n_parameters)
]
result = minimize(
self.compute_nll,
x0=x0,
bounds=self.parameter_bounds,
**method_kwargs
)
elif method == "differential_evolution":
result = differential_evolution(
self.compute_nll, bounds=self.parameter_bounds, **method_kwargs
)
else:
raise ValueError(
'Unknown method "{}". Use "minimize" or "differential_evolution".'.format(
method
)
)
if result.fun < best_nll:
best_nll = result.fun
best_x = result.x
return best_x, best_nll
class ExpectedUtility(ChoiceModel):
"""Expected Utility model
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="EU",
parameter_names=["alpha", "beta"],
parameter_bounds=[(0, 5), (0, 50)],
):
super(ExpectedUtility, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, beta = parameters
utilities = self.probabilities * self.outcomes ** alpha
choiceprobs = softmax(beta * utilities)
return choiceprobs
class WeightedAdditiveDN(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="WA_dn",
parameter_names=["wp", "beta"],
parameter_bounds=[(0, 1), (0, 50)],
):
super(WeightedAdditiveDN, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta = parameters
p_n = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
m_n = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
utilities = wp * p_n + (1 - wp) * m_n
choiceprobs = softmax(beta * utilities)
return choiceprobs
class WeightedAdditiveDNBLast(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
label="WA_dn_b-Last",
parameter_names=["wp", "beta", "b_last"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1)],
):
super(WeightedAdditiveDNBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last = parameters
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
p_n = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
m_n = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
utilities = wp * p_n + (1 - wp) * m_n
choiceprobs = softmax(beta * (utilities + last_bias))
return choiceprobs
class WeightedAdditiveDNBLastBLonger(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="WA_dn_b-Last_b-Longer",
parameter_names=["wp", "beta", "b_last", "b_longer"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1), (-1, 1)],
):
super(WeightedAdditiveDNBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last, b_longer = parameters
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
p_n = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
m_n = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
utilities = wp * p_n + (1 - wp) * m_n
choiceprobs = softmax(beta * (utilities + last_bias + longer_bias))
return choiceprobs
class WeightedAdditive(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="WA",
parameter_names=["wp", "beta"],
parameter_bounds=[(0, 1), (0, 200)],
):
super(WeightedAdditive, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta = parameters
utilities = wp * self.probabilities + (1 - wp) * self.outcomes
choiceprobs = softmax(beta * utilities)
return choiceprobs
class WeightedAdditiveBLastBLonger(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="WA_b-Last_b-Longer",
parameter_names=["wp", "beta", "b_last", "b_longer"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1), (-1, 1)],
):
super(WeightedAdditiveBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last, b_longer = parameters
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
utilities = wp * self.probabilities + (1 - wp) * self.outcomes
choiceprobs = softmax(beta * (utilities + last_bias + longer_bias))
return choiceprobs
class WeightedAdditiveBLast(ChoiceModel):
"""Weighted additive attribute model with divisive normalization
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
label="WA_b-Last",
parameter_names=["wp", "beta", "b_last"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1)],
):
super(WeightedAdditiveBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last = parameters
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
utilities = wp * self.probabilities + (1 - wp) * self.outcomes
choiceprobs = softmax(beta * (utilities + last_bias))
return choiceprobs
class ExpectedUtilityDurWeighted(ChoiceModel):
"""Presentation duration weighted Expected Utility model
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
dur_cols=["g0", "g1"],
label="EU_dur-weighted",
parameter_names=["alpha", "beta", "theta"],
parameter_bounds=[(0, 5), (0, 200), (0, 1)],
):
super(ExpectedUtilityDurWeighted, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.durations = data[dur_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, beta, theta = parameters
utilities = self.probabilities * self.outcomes ** alpha
biased_u = self.durations * utilities + (1 - self.durations) * theta * utilities
choiceprobs = softmax(beta * biased_u)
return choiceprobs
class OutcomeCutoff(ChoiceModel):
"""A heuristic model where the higher probability option is chosen unless one outcome reaches a threshold.
Uses epsilon greedy choice rule.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="mCutoff",
parameter_names=["m_min", "epsilon"],
parameter_bounds=[(0, 10), (0, 0.5)],
):
super(OutcomeCutoff, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
m_min, epsilon = parameters
m_threshold_crossed = np.any(self.outcomes > m_min, axis=1)
choiceprobs = np.where(
m_threshold_crossed[:, None],
egreedy(self.outcomes, epsilon),
egreedy(self.probabilities, epsilon),
)
return choiceprobs
class ExpectedUtilityBLastBLonger(ChoiceModel):
"""Expected Utility model
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="EU_b-Last_b-Longer",
parameter_names=["alpha", "beta", "b_longer", "b_last"],
parameter_bounds=[(0, 5), (0, 200), (-1, 1), (-1, 1)],
):
super(ExpectedUtilityBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, beta, b_longer, b_last = parameters
utilities = self.probabilities * self.outcomes ** alpha
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (utilities + longer_bias + last_bias))
return choiceprobs
class ExpectedUtilityBLast(ChoiceModel):
"""Expected Utility model
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
label="EU_b-Last",
parameter_names=["alpha", "beta", "b_last"],
parameter_bounds=[(0, 5), (0, 200), (-1, 1)],
):
super(ExpectedUtilityBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, beta, b_last = parameters
utilities = self.probabilities * self.outcomes ** alpha
# Bias
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (utilities + last_bias))
return choiceprobs
class OutcomeCutoffBLastBLonger(ChoiceModel):
"""A heuristic model where the higher probability option is chosen unless one outcome reaches a threshold.
Uses epsilon greedy choice rule.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="mCutoff_b-Last_b-Longer",
parameter_names=["m_min", "epsilon", "b_longer", "b_last"],
parameter_bounds=[(0, 10), (0, 0.5), (-1, 1), (-1, 1)],
):
super(OutcomeCutoffBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
m_min, epsilon, b_longer, b_last = parameters
m_threshold_crossed = np.any(self.outcomes > m_min, axis=1)
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = np.where(
m_threshold_crossed[:, None],
egreedy(self.outcomes + longer_bias + last_bias, epsilon),
egreedy(self.probabilities + longer_bias + last_bias, epsilon),
)
return choiceprobs
class OutcomeCutoffBLast(ChoiceModel):
"""A heuristic model where the higher probability option is chosen unless one outcome reaches a threshold.
Uses epsilon greedy choice rule.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
label="mCutoff_b-Last",
parameter_names=["m_min", "epsilon", "b_last"],
parameter_bounds=[(0, 10), (0, 0.5), (-1, 1)],
):
super(OutcomeCutoffBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
m_min, epsilon, b_last = parameters
m_threshold_crossed = np.any(self.outcomes > m_min, axis=1)
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = np.where(
m_threshold_crossed[:, None],
egreedy(self.outcomes + last_bias, epsilon),
egreedy(self.probabilities + last_bias, epsilon),
)
return choiceprobs
class ProspectTheory(ChoiceModel):
"""Prospect Theory model.
Assumes that objective probabilities are transformed into decision weights (using weighting function with parameter $\gamma$), and outcome utilities are computed with a power-function with parameter $\alpha$. Choice probabilities are derived from subjective expected utilities via a softmax function with inverse temperature parameter $\beta$.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="PT",
parameter_names=["alpha", "gamma", "beta"],
parameter_bounds=[(0, 5), (0.28, 1), (0, 200)],
):
super(ProspectTheory, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, gamma, beta = parameters
p = self.probabilities
w = p ** gamma / ((p ** gamma + (1 - p) ** gamma) ** (1 / gamma))
SU = w * self.outcomes ** alpha
choiceprobs = softmax(beta * SU)
return choiceprobs
class ProspectTheoryBLast(ChoiceModel):
"""Prospect Theory model.
Assumes that objective probabilities are transformed into decision weights (using weighting function with parameter $\gamma$), and outcome utilities are computed with a power-function with parameter $\alpha$. Choice probabilities are derived from subjective expected utilities via a softmax function with inverse temperature parameter $\beta$.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="PT_b-Last",
parameter_names=["alpha", "gamma", "beta", "b_last"],
parameter_bounds=[(0, 5), (0.28, 1), (0, 200), (-1, 1)],
):
super(ProspectTheoryBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, gamma, beta, b_last = parameters
p = self.probabilities
w = p ** gamma / ((p ** gamma + (1 - p) ** gamma) ** (1 / gamma))
SU = w * self.outcomes ** alpha
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (SU + last_bias))
return choiceprobs
class ProspectTheoryBLastBLonger(ChoiceModel):
"""Prospect Theory model.
Assumes that objective probabilities are transformed into decision weights (using weighting function with parameter $\gamma$), and outcome utilities are computed with a power-function with parameter $\alpha$. Choice probabilities are derived from subjective expected utilities via a softmax function with inverse temperature parameter $\beta$.
Attributes:
choices (np.ndarray): Array of choices of type int
label (str, optional): Model label
outcomes (np.ndarray): Array (n_trials x n_alternatives) of option outcomes
probabilities (np.ndarray): Array (n_trials x n_alternatives) of outcome probabilities
parameter_bounds (list): List of tuples of parameter boundaries [(alpha_low, alpha_up), (beta_low, beta_up)]
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="PT_b-Last_b-Longer",
parameter_names=["alpha", "gamma", "beta", "b_last", "b_longer"],
parameter_bounds=[(0, 5), (0.28, 1), (0, 200), (-1, 1), (-1, 1)],
):
super(ProspectTheoryBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, gamma, beta, b_last, b_longer = parameters
p = self.probabilities
w = p ** gamma / ((p ** gamma + (1 - p) ** gamma) ** (1 / gamma))
SU = w * self.outcomes ** alpha
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (SU + longer_bias + last_bias))
return choiceprobs
class Glickman1Layer(ChoiceModel):
"""Three alternative adaptation from the winning model from Glickman et al., 2019
Assumes that in each fixation, gaze-biased subjective utilities (see PT) are accumulated and all accumulators (irrespective of fixation) are subject to leak over individual fixations.
Parameters
----------
alpha (alpha > 0)
Utility function parameter
gamma (0.28 < gamma < 1)
Probability weighting parameter
beta (beta > 0)
Inverse temperature parameter
lambda (0 < lambda < 1)
Leak parameter (0 = perfect memory, 1 = full leak)
theta (0 < theta < 1)
Gaze bias parameter
"""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
label="glickman1layer",
parameter_names=["alpha", "gamma", "beta", "lam", "theta"],
parameter_bounds=[(0, 5), (0.2, 1), (0, 200), (0, 1), (0, 1)],
):
super(Glickman1Layer, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.fixated_alternatives = data["fixated_alternatives"].values
self.fixated_attributes = data["fixated_attributes"].values # 0 = p, 1 = m
self.fixation_durations = data["fixation_durations"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
self.n_items = len(probability_cols)
def predict_choiceprobs(self, parameters):
alpha, gamma, beta, lam, theta = parameters
p = self.probabilities
w = p ** gamma / ((p ** gamma + (1 - p) ** gamma) ** (1 / gamma))
SU = w * self.outcomes ** alpha
Y = np.zeros((self.n_trials, self.n_items))
for trial in range(self.n_trials):
# If fixation data present
if isinstance(self.fixation_durations[trial], np.ndarray):
for dur, alt, att in zip(
self.fixation_durations[trial],
self.fixated_alternatives[trial],
self.fixated_attributes[trial],
):
# Option wise gaze discount
theta_vector = np.ones(self.n_items) * theta
theta_vector[alt] = 1.0
Y[trial, :] = (1 - lam) * Y[trial, :] + theta_vector * SU[trial, :]
choiceprobs = softmax(beta * Y)
return choiceprobs
class Glickman2Layer(ChoiceModel):
"""Three alternative adaption from 2-layer model from Glickman et al., 2019
Also assumes that over fixations, subjective utilities (see PT) are accumulated. However, in contrast to the 1-layer model, here, the subjective stimulus attributes (decision weights and subjective utilities) also accumulate across fixations. The gaze-bias acts on the input to these lower-level accumulators (decision weights and subjective utilities), which are then combined *after the gaze bias was applied* in the next level.
Accumulators on both levels are subject to leak.
For a reference, see Glickman et al., 2019 (Fig. 6A)
Parameters
----------
alpha (alpha > 0)
Utility function parameter
gamma (0.28 < gamma < 1)
Probability weighting parameter
beta (beta > 0)
Inverse temperature parameter
lambda (0 < lambda < 1)
Leak parameter (0 = perfect memory, 1 = full leak)
theta (0 < theta < 1)
Gaze bias parameter
"""
def __init__(
self,
data,
probability_cols=["pA", "pB", "pC"],
outcome_cols=["mA", "mB", "mC"],
label="Glickman2Layer",
parameter_names=["alpha", "gamma", "beta", "lam", "theta"],
parameter_bounds=[(0, 5), (0.2, 1), (0, 50), (0, 1), (0, 1)],
):
super(Glickman2Layer, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.choices = data["choice"].values
self.fixated_alternatives = data["fixated_alternatives"].values # 0 = p, 1 = m
self.fixated_attributes = data["fixated_attributes"].values
self.fixation_durations = data["fixation_durations"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
self.n_items = len(probability_cols)
self.n_attributes = 2
def predict_choiceprobs(self, parameters):
alpha, gamma, beta, lam, theta = parameters
p = self.probabilities
w = p ** gamma / ((p ** gamma + (1 - p) ** gamma) ** (1 / gamma))
m = self.outcomes ** alpha
L1w = np.zeros((self.n_trials, self.n_items))
L1m = np.zeros((self.n_trials, self.n_items))
L2 = np.zeros((self.n_trials, self.n_items))
for trial in range(self.n_trials):
# If fixation data present
if isinstance(self.fixation_durations[trial], np.ndarray):
for dur, alt, att in zip(
self.fixation_durations[trial],
self.fixated_alternatives[trial],
self.fixated_attributes[trial],
):
# AOI wise gaze discount
theta_vector = np.ones((self.n_items, self.n_attributes)) * theta
theta_vector[alt, att] = 1.0
L1w[trial, :] = (1 - lam) * L1w[trial, :] + theta_vector[:, 0] * w[
trial, :
]
L1m[trial, :] = (1 - lam) * L1m[trial, :] + theta_vector[:, 1] * m[
trial, :
]
L2[trial, :] = (1 - lam) * L2[trial, :] + L1w[trial, :] * L1m[
trial, :
]
choiceprobs = softmax(beta * L2)
return choiceprobs
class DiffOfDiffBLastBLonger(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="DiffOfDiff_b-Last_b-Longer",
parameter_names=["wp", "beta", "b_last", "b_longer"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1), (-1, 1)],
):
super(DiffOfDiffBLastBLonger, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last, b_longer = parameters
# Attribute Differences
d_p = np.vstack(
[
self.probabilities[:, 0] - self.probabilities[:, 1],
self.probabilities[:, 1] - self.probabilities[:, 0],
]
).T
d_m = np.vstack(
[
self.outcomes[:, 0] - self.outcomes[:, 1],
self.outcomes[:, 1] - self.outcomes[:, 0],
]
).T
# Difference of differences
D = wp * d_p + (1 - wp) * d_m
# Biases
longer_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
longer_bias[np.arange(len(longer_bias)), self.longer] = b_longer
longer_bias = longer_bias[:, :2]
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (D + last_bias + longer_bias))
return choiceprobs
class DiffOfDiffBLast(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="DiffOfDiff_b-Last",
parameter_names=["wp", "beta", "b_last"],
parameter_bounds=[(0, 1), (0, 200), (-1, 1)],
):
super(DiffOfDiffBLast, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, b_last = parameters
# Attribute Differences
d_p = np.vstack(
[
self.probabilities[:, 0] - self.probabilities[:, 1],
self.probabilities[:, 1] - self.probabilities[:, 0],
]
).T
d_m = np.vstack(
[
self.outcomes[:, 0] - self.outcomes[:, 1],
self.outcomes[:, 1] - self.outcomes[:, 0],
]
).T
# Difference of differences
D = wp * d_p + (1 - wp) * d_m
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (D + last_bias))
return choiceprobs
class DiffOfDiff(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="DiffOfDiff",
parameter_names=["wp", "beta"],
parameter_bounds=[(0, 1), (0, 200)],
):
super(DiffOfDiff, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.longer = data[duration_favours_col].fillna(2).astype(int).values.ravel()
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta = parameters
# Attribute Differences
d_p = np.vstack(
[
self.probabilities[:, 0] - self.probabilities[:, 1],
self.probabilities[:, 1] - self.probabilities[:, 0],
]
).T
d_m = np.vstack(
[
self.outcomes[:, 0] - self.outcomes[:, 1],
self.outcomes[:, 1] - self.outcomes[:, 0],
]
).T
# Difference of differences
D = wp * d_p + (1 - wp) * d_m
choiceprobs = softmax(beta * D)
return choiceprobs
class TwoStageWithin(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
alt_gaze_cols=["g0", "g1"],
att_gaze_cols=["gp", "gm"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="TwoStageWithin",
parameter_names=["alpha", "theta", "beta", "b_last"],
parameter_bounds=[(0, 3), (0, 1), (0, 100), (-0.1, 0.1)],
):
super(TwoStageWithin, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values / 10
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.alt_gaze = data[alt_gaze_cols].values
self.att_gaze = data[att_gaze_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, theta, beta, b_last = parameters
eu = self.probabilities * self.outcomes ** alpha
# # Set theta to 1 in attribute-wise presentation trials
# trial_thetas = (
# (1 - self.data["presentation01"]) * theta + (self.data["presentation01"])
# ).values[:, None]
trial_thetas = theta
X = self.alt_gaze * eu + (1 - self.alt_gaze) * trial_thetas * eu
# Biases
# Set last-favours to 2 (favouring neither alt 1 or alt 2) in attribute-wise presentation
last_favours = np.where(self.data["by_attribute"], 2, self.last_favours)
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (X + last_bias))
return choiceprobs
class TwoStageBetween(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
alt_gaze_cols=["g0", "g1"],
att_gaze_cols=["gp", "gm"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="TwoStageBetween",
parameter_names=["wp", "eta", "beta", "b_last"],
parameter_bounds=[(0, 1), (0, 1), (0, 100), (-0.1, 0.1)],
):
super(TwoStageBetween, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.alt_gaze = data[alt_gaze_cols].values
self.att_gaze = data[att_gaze_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, eta, beta, b_last = parameters
pn = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
mn = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
# Set theta to 1 in attribute-wise presentation trials
# trial_etas = (
# (1 - self.data["presentation01"]) + (self.data["presentation01"] * eta)
# ).values[:, None]
trial_etas = eta
X = self.att_gaze[:, 0][:, None] * (
wp * pn + trial_etas * (1 - wp) * mn
) + self.att_gaze[:, 1][:, None] * (trial_etas * wp * pn + (1 - wp) * mn)
# Biases
# Set last-favours to 2 (favouring neither alt 1 or alt 2) in alternative-wise presentation
last_favours = np.where(~self.data["by_attribute"], 2, self.last_favours)
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (X + last_bias))
return choiceprobs
class TwoStageMixture(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
alt_gaze_cols=["g0", "g1"],
att_gaze_cols=["gp", "gm"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="TwoStageMixture",
parameter_names=[
"alpha",
"theta",
"wp",
"eta",
"s_between",
"w_between_attwise",
"w_between_altwise",
"beta",
"b_last",
],
parameter_bounds=[
(0, 3),
(0, 1),
(0, 1),
(0, 1),
(0, 40),
(0, 1),
(0, 1),
(0, 200),
(-1, 1),
],
):
super(TwoStageMixture, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.alt_gaze = data[alt_gaze_cols].values
self.att_gaze = data[att_gaze_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
(
alpha,
theta,
wp,
eta,
s_between,
w_between_attwise,
w_between_altwise,
beta,
b_last,
) = parameters
# Between alternatives component
pn = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
mn = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
# Set theta to 1 in attribute-wise presentation trials
# trial_etas = (
# (1 - self.data["presentation01"]) + (self.data["presentation01"] * eta)
# ).values[:, None]
trial_etas = eta
X_between = self.att_gaze[:, 0][:, None] * (
wp * pn + trial_etas * (1 - wp) * mn
) + self.att_gaze[:, 1][:, None] * (trial_etas * wp * pn + (1 - wp) * mn)
# Within alternative component
eu = self.probabilities * self.outcomes ** alpha
# Set theta to 1 in attribute-wise presentation trials
# trial_thetas = (
# (1 - self.data["presentation01"]) * theta + (self.data["presentation01"])
# ).values[:, None]
trial_thetas = theta
X_within = self.alt_gaze * eu + (1 - self.alt_gaze) * trial_thetas * eu
# Weighted combination
w_between = (
self.data["presentation01"] * w_between_attwise
+ (1 - self.data["presentation01"]) * w_between_altwise
).values[:, None]
X = (1 - w_between) * X_within + w_between * s_between * X_between
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (X + last_bias))
return choiceprobs
class TwoStageMixtureNoScaling(ChoiceModel):
""""""
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
alt_gaze_cols=["g0", "g1"],
att_gaze_cols=["gp", "gm"],
last_stage_favours_col=["last_stage_favours"],
duration_favours_col=["duration_favours"],
label="TwoStageMixtureNoScaling",
parameter_names=[
"alpha",
"theta",
"wp",
"eta",
"w_between_attwise",
"w_between_altwise",
"beta",
"b_last",
],
parameter_bounds=[
(0, 3), # alpha
(0, 1), # theta
(0, 1), # wp
(0, 1), # eta
(0, 1), # w_between_attwise
(0, 1), # w_between_altwise
(0, 100), # beta
(-0.1, 0.1), # b_last
],
):
super(TwoStageMixtureNoScaling, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values / 10
self.last_favours = (
data[last_stage_favours_col].fillna(2).astype(int).values.ravel()
)
self.alt_gaze = data[alt_gaze_cols].values
self.att_gaze = data[att_gaze_cols].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
(
alpha,
theta,
wp,
eta,
w_between_attwise,
w_between_altwise,
beta,
b_last,
) = parameters
# Between alternatives component
pn = self.probabilities / self.probabilities.sum(axis=1, keepdims=True)
mn = self.outcomes / self.outcomes.sum(axis=1, keepdims=True)
# Set theta to 1 in attribute-wise presentation trials
# trial_etas = (
# (1 - self.data["presentation01"]) + (self.data["presentation01"] * eta)
# ).values[:, None]
trial_etas = eta
X_between = self.att_gaze[:, 0][:, None] * (
wp * pn + trial_etas * (1 - wp) * mn
) + self.att_gaze[:, 1][:, None] * (trial_etas * wp * pn + (1 - wp) * mn)
# Within alternative component
eu = self.probabilities * self.outcomes ** alpha
# Set theta to 1 in attribute-wise presentation trials
# trial_thetas = (
# (1 - self.data["presentation01"]) * theta + (self.data["presentation01"])
# ).values[:, None]
trial_thetas = theta
X_within = self.alt_gaze * eu + (1 - self.alt_gaze) * trial_thetas * eu
# Weighted combination
w_between = (
self.data["presentation01"] * w_between_attwise
+ (1 - self.data["presentation01"]) * w_between_altwise
).values[:, None]
X = (1 - w_between) * X_within + w_between * X_between
# Biases
last_bias = np.zeros((self.outcomes.shape[0], self.outcomes.shape[1] + 1))
last_bias[np.arange(len(last_bias)), self.last_favours] = b_last
last_bias = last_bias[:, :2]
choiceprobs = softmax(beta * (X + last_bias))
return choiceprobs
class LeakyAltwiseDiscount(ChoiceModel):
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
sequence_col="sequence",
presentation_col="presentation",
label="LeakyAltwiseDiscount",
parameter_names=["alpha", "beta", "omega", "theta"],
parameter_bounds=[(0, 5), (0, 100), (0, 1), (0, 1)],
):
super(LeakyAltwiseDiscount, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values
self.sequences = data[sequence_col].values
self.presentations = data[presentation_col].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
alpha, beta, omega, theta = parameters
s = self.probabilities * self.outcomes ** alpha
V = np.zeros_like(s)
for trial in range(self.n_trials):
sequence_trial = json.loads(self.sequences[trial])
fixation_durations = np.array(sequence_trial["durations"]) / 1000
fixated_alternatives = np.array(sequence_trial["alternatives"])
for i, (dur, target) in enumerate(
zip(fixation_durations, fixated_alternatives)
):
if target in [0, 1]:
theta_vec = np.ones(s.shape[1]) * theta
theta_vec[target] = 1
else: # target is "all", both alternatives visible
theta_vec = np.ones(s.shape[1])
V[trial, :] = omega ** dur * V[trial, :] + theta_vec * s[trial, :] * dur
choiceprobs = softmax(beta * V)
return choiceprobs
class LeakyAttwiseDiscount(ChoiceModel):
def __init__(
self,
data,
probability_cols=["p0", "p1"],
outcome_cols=["m0", "m1"],
sequence_col="sequence",
presentation_col="presentation",
label="LeakyAttwiseDiscount",
parameter_names=["wp", "beta", "omega", "eta"],
parameter_bounds=[(0, 1), (0, 200), (0, 1), (0, 1)],
):
super().__init__() # super(LeakyAttwiseDiscount, self).__init__()
self.data = data
self.probability_cols = probability_cols
self.probabilities = data[probability_cols].values
self.outcomes = data[outcome_cols].values / 10
self.sequences = data[sequence_col].values
self.presentations = data[presentation_col].values
self.choices = data["choice"].values
self.label = label
self.parameter_names = parameter_names
self.parameter_bounds = parameter_bounds
self.n_parameters = len(parameter_names)
self.n_trials = len(data)
def predict_choiceprobs(self, parameters):
wp, beta, omega, eta = parameters
n_attributes = 2
dp = np.vstack(
[
self.probabilities[:, 0] - self.probabilities[:, 1],
self.probabilities[:, 1] - self.probabilities[:, 0],
]
).T
dm = np.vstack(
[
self.outcomes[:, 0] - self.outcomes[:, 1],
self.outcomes[:, 1] - self.outcomes[:, 0],
]
).T
V =
|
np.zeros_like(self.outcomes)
|
numpy.zeros_like
|
import numpy, warnings, struct
class MetaHead(object):
"""
Parse a packed representation of MetaHead_type from photonics.h.
"""
_struct = struct.Struct('<5s8B32s40s')
size = _struct.size
def __init__(self, buf):
v = self._struct.unpack(buf)
stringify = lambda s: s[:s.index('\0')]
self.File_Format_Label = stringify(v[0])
self.File_Format_Revision = v[1]
self.bitsys = v[3]
self.endian = v[4]
self.level = v[5]
self.Photonics_Version = stringify(v[9])
def pack(self):
v = (self.File_Format_Label, self.File_Format_Revision, 0, self.bitsys,
self.endian, self.level, 0, 0, 0, self.Photonics_Version, "")
return self._struct.pack(*v)
class Header(object):
"""
Parse a packed representation of Header_type from photonics.h.
"""
_struct = struct.Struct('<100s6f3i6f7i25f2i1f1i2q2i')
size = _struct.size
def __init__(self, fh):
v = self._struct.unpack(fh.read(self.size))
self.MetaHead = MetaHead(v[0][:-15])
self.sphere_frac = v[1]
self.impsampl_Lf = v[2]
self.impsampl_Tf = v[3]
self.ref_np = v[5]
self.ref_ng = v[6]
self.record_errors = bool(v[7])
self.source_type = v[8]
self.extended_source = bool(v[9])
self.step = v[10]
self.e = v[11]
self.volume = v[12]
self.angle = v[13]
self.source_rz = (v[14], v[15])
self.geo = v[16]
self.n = v[17:23]
self.limits = []
self.maxes = []
pos = 23
for i in range(6):
self.limits.append(v[pos:pos+2])
pos += 2
for i in range(6):
self.maxes.append(v[pos:pos+2])
pos += 2
self.depth = v[47]
self.d_scale = v[48]
self.t_scale = v[49]
self.lambda_ = v[50]
self.efficiency = v[51]
self.n_photon = v[52]
self.n_entries = v[53]
self.refraction_mode = v[54]
def write(self, fh):
v = [self.MetaHead.pack(), self.sphere_frac, self.impsampl_Lf, self.impsampl_Tf, 0,
self.ref_np, self.ref_ng, self.record_errors, self.source_type, self.extended_source,
self.step, self.e, self.volume, self.angle]
v += list(self.source_rz)
v += [self.geo]
v += list(self.n)
for l in self.limits:
v += list(l)
for l in self.maxes:
v += list(l)
v += [self.depth, self.d_scale, self.t_scale, self.lambda_, self.efficiency,
self.n_photon, self.n_entries, self.refraction_mode, 0]
fh.write(self._struct.pack(*v))
assert(Header.size == 328)
class Efficiency(object):
"""Normalization types from photonics.h"""
NONE = 0x00
RECEIVER = 0x01
SOURCE = 0x02
WAVELENGTH = 0x04
AREA = 0x08
VOLUME = 0x10
N_PHOTON = 0x20
EMISSION = 0x40
USER_DEFINED = 0x80
DIFFERENTIAL = 0x100
N_EVENT = 0x200
class Geometry(object):
SPHERICAL = 1
CYLINDRICAL = 2
CUBIC = 3
class GeometryType(object):
POINTSOURCE = 0
INFINITEMUON = 1
class Parity(object):
EVEN = 0
ODD = 1
# Class for reading IceCube photonics tables
class Table(object):
level = -1
table = None
weights = None
bin_centers = None
bin_widths = None
is_integral = False
filename = None
# Constructor. Creates instance and optionally opens pt file.
def __init__(self, filename=None, normalize=True, mmap=True):
if filename is not None:
self.open_file(filename, mmap=mmap)
if normalize:
try:
self.normalize()
except:
pass
@classmethod
def stack(cls, outfile, *fnames):
import shutil
shutil.copy(fnames[0], outfile)
target = cls()
target._read_standalone(outfile, mmap=True, mode='r+')
for fn in fnames[1:]:
piece = cls()
piece._read_standalone(fn, mmap=True, mode='r')
target.values += piece.values
if piece.ph_header.record_errors and target.ph_header.record_errors:
target.weights += piece.weights
target.ph_header.n_photon += piece.ph_header.n_photon
with open(outfile, 'r+') as fh:
fh.seek(0)
target.ph_header.write(fh)
# Checks consistency of loaded tables.
# For now, only checks shapes of various arrays.
def table_shape_consistent(self):
shapes = set()
shapes.add(self.values.shape)
if self.weights is not None:
shapes.add(self.weights.shape)
shapes.add(tuple([len(i) for i in self.bin_centers]))
shapes.add(tuple([len(i) for i in self.bin_widths]))
if len(shapes) > 1:
return 0
return 1
# Normalize to absolute bin amplitudes
def normalize(self):
eff = self.header['efficiency']
if not (eff & Efficiency.N_PHOTON):
# This table still contains raw weights from photomc
self.values /= self.header['n_photon']
self.weights /= self.header['n_photon']
eff = eff | Efficiency.N_PHOTON
if (eff & Efficiency.DIFFERENTIAL):
# Someone has made this a dP/dt table. Undo their work.
if self.values.ndim != 4:
raise ValueError("This table is weird, man.")
shape = [1]*len(self.values.shape)
shape[-1] = self.values.shape[-1]
dt = self.bin_widths[-1].reshape(shape)
self.values *= dt
eff = eff & ~Efficiency.DIFFERENTIAL
self.header['efficiency'] = eff
# Returns number of dimensions in table.
@property
def ndim(self):
return len(self.shape)
# Returns shape of table.
@property
def shape(self):
if not self.table_shape_consistent():
raise Exception('Shape consistency check failed')
return self.values.shape
def remove_nans_and_infinites(self, dovalues=True, doweights=True):
if self.weights is not None and doweights:
self.weights[numpy.logical_not( \
numpy.isfinite(self.values))] = 0
if dovalues:
self.values [numpy.logical_not( \
numpy.isfinite(self.values))] = 0
@property
def normed(self):
"""Has this table been normalized?"""
if self.values.ndim == 4:
normval = self.values[:,:,:,-1]
if (normval[(normval > 0) & \
numpy.isfinite(normval)] == 1).all():
return True
else:
return False
else:
return True
def _read_standalone(self, filename, mmap, mode='r'):
"""
Read using standalone implementation.
"""
header = Header(open(filename))
if header.MetaHead.level == 2:
raise ValueError("I don't know how to read level-2 tables!")
self.values = numpy.squeeze(numpy.memmap(filename, shape=header.n, dtype=numpy.float32, offset=Header.size, mode=mode))
if header.record_errors:
offset = Header.size + self.values.itemsize*self.values.size
self.weights = numpy.squeeze(numpy.memmap(filename, shape=header.n, dtype=numpy.float32, offset=offset, mode=mode))
else:
self.weights = numpy.zeros(self.values.shape, dtype=numpy.float32)
# In keeping with a convention established by photo2numpy,
# tables are either mmap'd in single precision or read in
# to memory completely in double precision
if not mmap:
self.values = self.values.astype(numpy.float64)
self.weights = self.weights.astype(numpy.float64)
else:
warnings.warn("Memory-mapped tables are single-precision. You have been warned.");
self.bin_centers = []
self.bin_widths = []
trafos = [lambda a: a]*len(header.limits)
itrafos = [lambda a: a]*len(header.limits)
if header.geo == Geometry.SPHERICAL:
trafos[2] = lambda a: -numpy.cos(numpy.pi*a/180.)
if header.d_scale == 2:
trafos[0] = numpy.sqrt
itrafos[0] = lambda a: a**2
if header.t_scale == 2:
trafos[-1] = numpy.sqrt
itrafos[-1] = lambda a: a**2
for i in range(len(header.limits)):
steps = header.n[i]+1
if steps == 2:
continue
lo, hi = list(map(trafos[i], header.limits[i]))
edges = itrafos[i](numpy.linspace(lo, hi, steps))
self.bin_centers.append(0.5*(edges[1:]+edges[:-1]))
self.bin_widths.append(numpy.diff(edges))
self.ph_header = header
# Add compatibility
self.level = header.MetaHead.level
self.header = {
'n_photon' : header.n_photon,
'efficiency': header.efficiency,
'geometry' : header.geo,
'zenith' : header.angle,
'z' : header.depth,
'n_group' : header.ref_ng,
'n_phase' : header.ref_np,
}
def open_file(self, filename, convert=False, mmap=False):
self._read_standalone(filename, mmap)
# Level 2 tables get an extra, random element here for
# some reason
if len(self.bin_centers) > self.values.ndim:
self.bin_centers = self.bin_centers[0:self.values.ndim]
if len(self.bin_widths ) > self.values.ndim:
self.bin_widths = self.bin_widths [0:self.values.ndim]
# Check consistency of table shapes and derive type
ndim = self.ndim
if ndim == 3:
self.is_integral = True;
# Convert to standard format unless user doesn't want this
if convert:
self.convert_to_level1()
return 1
def convert_to_level1(self):
if self.level == 0 or self.level == 1:
return 1
# For level 2, some axes are reversed.
if self.level == 2:
self.values = numpy.rollaxis(self.values, 0, 3)
if self.weights is not None:
self.weights = \
numpy.rollaxis(self.weights, 0, 3)
self.bin_centers[2], self.bin_centers[0], \
self.bin_centers[1] = self.bin_centers[0], \
self.bin_centers[1], self.bin_centers[2]
self.bin_widths[2], self.bin_widths[0], \
self.bin_widths[1] = self.bin_widths[0], \
self.bin_widths[1], self.bin_widths[2]
from math import pi
self.bin_centers[1][:] *= 180./pi
self.bin_widths[1][:] *= 180./pi
self.level = 1
return 1
print("Don't know how to convert table with level", self.level)
return 0
def convert_to_level2(self):
if self.level == 2:
return 1
# For level 0/1, some axes are reversed.
if self.level == 0 or self.level == 1:
self.values = numpy.rollaxis(self.values, 2, 0)
if self.weights is not None:
self.weights = numpy.rollaxis(self.weights, \
2, 0)
self.bin_centers[0], self.bin_centers[1], \
self.bin_centers[2] = self.bin_centers[2], \
self.bin_centers[0], self.bin_centers[1]
self.bin_widths[0], self.bin_widths[1], \
self.bin_widths[2] = self.bin_widths[2], \
self.bin_widths[0], self.bin_widths[1]
from math import pi
self.bin_centers[1][:] *= pi/180.
self.bin_widths[1][:] *= pi/180.
self.level = 2
return 1
print("Don't know how to convert table with level", self.level)
return 0
def mirror(self,n_rho=0,n_phi=0):
"""Extend table to rho < 0 and 180 < phi < 360. This may be useful for surpressing edge effects while fitting."""
if n_rho == 0 and n_phi == 0:
return None
if abs(self.bin_widths[1].sum() - 180) > 1e-12:
raise ValueError("Only half-cylindrical tables can \
be mirrored. Perhaps mirror() has already been \
called?")
## XXX only phi mirroring for now
new_shape = list(self.values.shape)
new_shape[0] += n_rho
new_shape[1] += 2*n_phi
target_slice = [slice(None)]*self.values.ndim
source_slice = [slice(None)]*self.values.ndim
target_slice[0] = slice(n_rho, None)
target_slice[1] = slice(n_phi, -n_phi)
# copy values into expanded array
new_values = numpy.empty(new_shape)
new_values[target_slice] = self.values
# replace old values with expanded version
del self.values
self.values = new_values
# copy weights into expanded array
new_weights = numpy.empty(new_shape)
new_weights[target_slice] = self.weights
# replace weights
del self.weights
self.weights = new_weights
# replace bin centers and widths
for lst in (self.bin_centers, self.bin_widths):
for i in (0,1):
new = numpy.empty(new_shape[i])
new[target_slice[i]] = lst[i]
lst[i] = new
# mirror left edge
source_slice[1] = [2*n_phi - 1 - i for i in range(n_phi)]
target_slice[0] = slice(None)
target_slice[1] = list(range(n_phi))
for array in (self.values, self.weights):
array[target_slice] = array[source_slice]
for lst in (self.bin_centers, self.bin_widths):
lst[1][target_slice[1]] = -(lst[1][source_slice[1]])
# mirror right edge
source_slice[1] = [-(2*n_phi - i) for i in range(n_phi)]
target_slice[1] = [-(i+1) for i in range(n_phi)]
for array in (self.values, self.weights):
array[target_slice] = array[source_slice]
for lst in (self.bin_centers, self.bin_widths):
lst[1][target_slice[1]] = 360 - lst[1][source_slice[1]]
# mirror radial slices
# negative radii are mirrored, so in reverse order
source_slice[0] = list(range(2*n_rho - 1, n_rho - 1, -1))
target_slice[0] = list(range(n_rho))
for lst in (self.bin_centers, self.bin_widths):
lst[0][target_slice[0]] = -(lst[0][source_slice[0]])
# mirror the radial slice at each azimuth to negative radii
for i in range(self.bin_centers[1].size):
# find the opposite slice
opposite = 180 + self.bin_centers[1][i]
if opposite > 180: opposite -= 2*(180 - opposite)
elif opposite < 0: opposite *= -1
mcenter = abs(opposite - self.bin_centers[1]).argmin()
source_slice[1] = mcenter
target_slice[1] = i
for array in (self.values, self.weights):
array[target_slice] = array[source_slice]
return None
class FITSTable(Table):
"""
Same content as a photonics table, but using the FITS-based file format
produced by the clsim tabulator.
"""
# A default header. This contains the same keys as the one created in photo2numpy from photospline.
empty_header = {
'n_photons': 0,
'efficiency': Efficiency.NONE,
'geometry': Geometry.SPHERICAL,
'parity': Parity.EVEN,
'zenith': 0.,
'azimuth': 0.,
'z': 0.,
'energy': 0.,
'type': 0,
'level': 1,
'n_group': numpy.nan,
'n_phase': numpy.nan,
}
def __init__(self, binedges, values, weights, header=empty_header):
self.bin_edges = binedges
self._visible_range = [slice(1,-1)]*len(binedges)
shape = tuple((len(edges)+1 for edges in binedges))
# add under and overflow bins if not present
if values.shape == tuple((len(edges)-1 for edges in binedges)):
full_values = numpy.zeros(shape)
full_values[self._visible_range] = values
values = full_values
if weights is not None:
full_weights = numpy.zeros(shape)
full_weights[self._visible_range] = weights
weights = full_weights
assert values.shape == shape, "Data array has the correct shape"
self._values = values
self._weights = weights
self.bin_centers = [(edges[1:]+edges[:-1])/2. for edges in self.bin_edges]
self.bin_widths = [numpy.diff(edges) for edges in self.bin_edges]
self.header = header
@property
def values(self):
return self._values[self._visible_range]
@property
def weights(self):
if self._weights is None:
return None
else:
return self._weights[self._visible_range]
def __getitem__(self, slice_):
for i in slice_:
if not (isinstance(i, int) or i == slice(None)):
# print(slice_)
print(i, isinstance(i, int), i == slice(None))
raise ValueError("Only dimension-reducing slices are implemented")
edges = [e for i, e in enumerate(self.bin_edges) if slice_[i] == slice(None)]
if self.weights is None:
w = None
else:
w = self.weights[slice_]
return FITSTable(edges, self.values[slice_], w, self.header)
def __iadd__(self, other):
self.raise_if_incompatible(other)
self._values += other._values
if self._weights is not None:
self._weights += other._weights
self.header['n_photons'] += other.header['n_photons']
if 'n_events' in self.header:
self.header['n_events'] += other.header['n_events']
return self
def __idiv__(self, num):
return self.__imul__(1./num)
def __imul__(self, num):
self._values *= num
if self.weights is not None:
self._weights *= num*num
return self
def raise_if_incompatible(self, other):
"""
Check for generalized brokenness.
"""
if not isinstance(other, self.__class__):
raise TypeError("Can't combine a %s with this %s" % (other.__class__.__name__, self.__class__.__name__))
if self._values.shape != other._values.shape:
raise ValueError("Shape mismatch in data arrays!")
nans = self._values.size - numpy.isfinite(self._values).sum()
if nans != 0:
raise ValueError("This table has %d NaN values. You might want to see to that.")
nans = other._values.size - numpy.isfinite(other._values).sum()
if nans != 0:
raise ValueError("Other table has %d NaN values. You might want to see to that.")
for k, v in self.header.items():
if k in ('n_photons', 'n_events'):
continue
if other.header[k] != v:
raise ValueError("Can't combine tables with %s=%s and %s" % (k, v, other.header[k]))
def normalize(self, kind='photon'):
"""
Normalize the table. If *kind* is 'photon', normalize such that the
entries in the table are number of PE detected per Cherenkov photon
emitted between 300 and 600 nm, as in Photonics. If *kind* is 'event',
normalize such that the entries in the table are the number of PE
detected per event (e.g. per minimum-ionizing muon).
"""
if kind == 'photon':
assert not self.header['efficiency'] & Efficiency.N_EVENT
if not self.header['efficiency'] & Efficiency.N_PHOTON:
self /= self.header['n_photons']
self.header['efficiency'] |= Efficiency.N_PHOTON
elif kind == 'event':
assert not self.header['efficiency'] & Efficiency.N_PHOTON
if not self.header['efficiency'] & Efficiency.N_EVENT:
self /= self.header['n_events']
self.header['efficiency'] |= Efficiency.N_EVENT
else:
raise ValueError("Unknown normalization type '%s'" % kind)
def save(self, fname, overwrite=False):
try:
import pyfits
except ImportError:
import astropy.io.fits as pyfits
import os
if os.path.exists(fname):
if overwrite:
os.unlink(fname)
else:
raise IOError("File '%s' exists!" % fname)
data = pyfits.PrimaryHDU(self._values)
data.header.set('TYPE', 'Photon detection probability table')
for k, v in self.header.items():
# work around 8-char limit in FITS keywords
tag = 'hierarch _i3_' + k
data.header.set(tag, v)
hdulist = pyfits.HDUList([data])
if self._weights is not None:
errors = pyfits.ImageHDU(self._weights, name='ERRORS')
hdulist.append(errors)
for i in range(self._values.ndim):
edgehdu = pyfits.ImageHDU(self.bin_edges[i],name='EDGES%d' % i)
hdulist.append(edgehdu)
hdulist.writeto(fname)
@classmethod
def load(cls, fname):
try:
import pyfits
except ImportError:
import astropy.io.fits as pyfits
hdulist = pyfits.open(fname)
data = hdulist[0]
values = data.data
binedges = []
for i in range(values.ndim):
binedges.append(hdulist['EDGES%d' % i].data)
try:
weights = hdulist['ERRORS'].data
except KeyError:
weights = None
header = dict()
for k in map(str.lower, data.header.keys()):
if k.startswith('_i3_'):
header[k[4:]] = data.header[k]
return cls(binedges, values, weights, header)
@classmethod
def stack(cls, outfile, *fnames):
import os
assert(not os.path.exists(outfile))
target = cls.load(fnames[0])
for fn in fnames[1:]:
target += cls.load(fn)
target.save(outfile)
def melonball(table, weights = None, radius = 1):
"""Set weights inside a given radius to zero."""
if table.header['geometry'] != Geometry.CYLINDRICAL:
raise ValueError("Can't handle non-cylindrical tables")
Rho, Z = numpy.meshgrid_nd(table.bin_centers[0], table.bin_centers[2], lex_order=True)
mask = Rho**2 + Z**2 < radius**2
if weights is None:
weights = table.weights
shape = weights.shape
for i in range(shape[1]):
if weights.ndim == 3:
weights[:,i,:][mask] = 0
else:
for j in range(shape[3]):
weights[:,i,:,j][mask] = 0
def scalp(table, weights = None, low = -820, high = 820):
"""Set weights outside of the given depth range to zero."""
geo = table.header['geometry']
depth = table.header['z']
zenith = table.header['zenith']*numpy.pi/180.0
if geo == Geometry.CYLINDRICAL:
Rho, Phi, L =
|
numpy.meshgrid_nd(*table.bin_centers[:3], lex_order=True)
|
numpy.meshgrid_nd
|
""" Minimum working example of an SME script
"""
import datetime
import os
import os.path
import re
from os.path import dirname, join, realpath
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import coordinates as coord
from astropy.io import fits
from astropy.time import Time
from data_sources.StellarDB import StellarDB
from scipy.linalg import lstsq, solve_banded
from scipy.ndimage.filters import gaussian_filter1d, median_filter
from scipy.optimize import least_squares
from tqdm import tqdm
from pysme import sme as SME
from pysme import util
from pysme.abund import Abund
from pysme.gui import plot_plotly
from pysme.iliffe_vector import Iliffe_vector
from pysme.linelist.vald import ValdFile
from pysme.persistence import save_as_idl
from pysme.solve import solve
from pysme.synthesize import synthesize_spectrum
def get_teff_from_spectral_type(spectral_type):
spectral_class = spectral_type[0]
spectral_number = spectral_type[1]
teff_dict = {
"M": {
"0": 3800,
"1": 3600,
"2": 3400,
"3": 3250,
"4": 3100,
"5": 2800,
"6": 2600,
"7": 2500,
"8": 2400,
"9": 2300,
}
}
return teff_dict[spectral_class][spectral_number]
def black_body_curve(teff, wave):
h = const.h.to_value("J s")
c = const.c.to_value("AA/s")
kB = const.k_B.to_value("J/K")
B = 2 * h * c ** 2 / wave ** 5 * (np.exp(h * c / (wave * kB * teff)) - 1) ** -1
return B
def hl_envelopes_idx(s, dmin=1, dmax=1, split=False):
"""
Input :
s: 1d-array, data signal from which to extract high and low envelopes
dmin, dmax: int, optional, size of chunks, use this if the size of the input signal is too big
split: bool, optional, if True, split the signal in half along its mean, might help to generate the envelope in some cases
Output :
lmin,lmax : high/low envelope idx of input signal s
"""
# locals min
lmin = (np.diff(np.sign(np.diff(s))) > 0).nonzero()[0] + 1
# locals max
lmax = (np.diff(np.sign(np.diff(s))) < 0).nonzero()[0] + 1
if split:
# s_mid is zero if s centered around x-axis or more generally mean of signal
s_mid = np.mean(s)
# pre-sorting of locals min based on relative position with respect to s_mid
lmin = lmin[s[lmin] < s_mid]
# pre-sorting of local max based on relative position with respect to s_mid
lmax = lmax[s[lmax] > s_mid]
# global max of dmax-chunks of locals max
lmin = lmin[
[i + np.argmin(s[lmin[i : i + dmin]]) for i in range(0, len(lmin), dmin)]
]
# global min of dmin-chunks of locals min
lmax = lmax[
[i + np.argmax(s[lmax[i : i + dmax]]) for i in range(0, len(lmax), dmax)]
]
return lmin, lmax
class Plot_Normalization: # pragma: no cover
def __init__(self, wsort, sB, new_wave, contB, iteration=0, title=None):
plt.ion()
self.fig = plt.figure()
self.title = title
suptitle = f"Iteration: {iteration}"
if self.title is not None:
suptitle = f"{self.title}\n{suptitle}"
self.fig.suptitle(suptitle)
self.ax = self.fig.add_subplot(111)
self.line1 = self.ax.plot(wsort, sB, label="Spectrum")[0]
self.line2 = self.ax.plot(new_wave, contB, label="Continuum Fit")[0]
plt.legend()
plt.show()
def plot(self, wsort, sB, new_wave, contB, iteration):
suptitle = f"Iteration: {iteration}"
if self.title is not None:
suptitle = f"{self.title}\n{suptitle}"
self.fig.suptitle(suptitle)
self.line1.set_xdata(wsort)
self.line1.set_ydata(sB)
self.line2.set_xdata(new_wave)
self.line2.set_ydata(contB)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
plt.ioff()
plt.close()
def opt_filter(y, par, par1=None, weight=None, lambda2=-1, maxiter=100):
"""
Optimal filtering of 1D and 2D arrays.
Uses tridiag in 1D case and sprsin and linbcg in 2D case.
Written by N.Piskunov 8-May-2000
Parameters
----------
f : array
1d or 2d array
xwidth : int
filter width (for 2d array width in x direction (1st index)
ywidth : int
(for 2d array only) filter width in y direction (2nd index) if ywidth is missing for 2d array, it set equal to xwidth
weight : array(float)
an array of the same size(s) as f containing values between 0 and 1
lambda1: float
regularization parameter
maxiter : int
maximum number of iteration for filtering of 2d array
"""
y = np.asarray(y)
if y.ndim not in [1, 2]:
raise ValueError("Input y must have 1 or 2 dimensions")
if par < 1:
par = 1
# 1D case
if y.ndim == 1 or (y.ndim == 2 and (y.shape[0] == 1 or y.shape[1] == 1)):
y = y.ravel()
n = y.size
if weight is None:
weight = np.ones(n)
elif np.isscalar(weight):
weight = np.full(n, weight)
else:
weight = weight[:n]
if lambda2 > 0:
# Apply regularization lambda
aij = np.zeros((5, n))
# 2nd lower subdiagonal
aij[0, 2:] = lambda2
# Lower subdiagonal
aij[1, 1] = -par - 2 * lambda2
aij[1, 2:-1] = -par - 4 * lambda2
aij[1, -1] = -par - 2 * lambda2
# Main diagonal
aij[2, 0] = weight[0] + par + lambda2
aij[2, 1] = weight[1] + 2 * par + 5 * lambda2
aij[2, 2:-2] = weight[2:-2] + 2 * par + 6 * lambda2
aij[2, -2] = weight[-2] + 2 * par + 5 * lambda2
aij[2, -1] = weight[-1] + par + lambda2
# Upper subdiagonal
aij[3, 0] = -par - 2 * lambda2
aij[3, 1:-2] = -par - 4 * lambda2
aij[3, -2] = -par - 2 * lambda2
# 2nd lower subdiagonal
aij[4, 0:-2] = lambda2
# RHS
b = weight * y
f = solve_banded((2, 2), aij, b)
else:
a = np.full(n, -abs(par))
b = np.copy(weight) + abs(par)
b[1:-1] += abs(par)
aba = np.array([a, b, a])
f = solve_banded((1, 1), aba, weight * y)
return f
else:
# 2D case
if par1 is None:
par1 = par
if par == 0 and par1 == 0:
raise ValueError("xwidth and ywidth can't both be 0")
n = y.size
nx, ny = y.shape
lam_x = abs(par)
lam_y = abs(par1)
n = nx * ny
ndiag = 2 * nx + 1
aij = np.zeros((n, ndiag))
aij[nx, 0] = weight[0, 0] + lam_x + lam_y
aij[nx, 1:nx] = weight[0, 1:nx] + 2 * lam_x + lam_y
aij[nx, nx : n - nx] = weight[1 : ny - 1] + 2 * (lam_x + lam_y)
aij[nx, n - nx : n - 1] = weight[ny - 1, 0 : nx - 1] + 2 * lam_x + lam_y
aij[nx, n - 1] = weight[ny - 1, nx - 1] + lam_x + lam_y
aij[nx - 1, 1:n] = -lam_x
aij[nx + 1, 0 : n - 1] = -lam_x
ind = np.arrange(ny - 1) * nx + nx + nx * n
aij[ind - 1] = aij[ind - 1] - lam_x
aij[ind] = aij[ind] - lam_x
ind = np.arrange(ny - 1) * nx + nx
aij[nx + 1, ind - 1] = 0
aij[nx - 1, ind] = 0
aij[0, nx:n] = -lam_y
aij[ndiag - 1, 0 : n - nx] = -lam_y
rhs = f * weight
model = solve_banded((nx, nx), aij, rhs)
model = np.reshape(model, (ny, nx))
return model
def middle(
f,
param,
x=None,
iterations=40,
eps=0.001,
poly=False,
weight=1,
lambda2=-1,
mn=None,
mx=None,
):
"""
middle tries to fit a smooth curve that is located
along the "middle" of 1D data array f. Filter size "filter"
together with the total number of iterations determine
the smoothness and the quality of the fit. The total
number of iterations can be controlled by limiting the
maximum number of iterations (iter) and/or by setting
the convergence criterion for the fit (eps)
04-Nov-2000 N.Piskunov wrote.
09-Nov-2011 NP added weights and 2nd derivative constraint as LAM2
Parameters
----------
f : Callable
Function to fit
filter : int
Smoothing parameter of the optimal filter (or polynomial degree of poly is True)
iter : int
maximum number of iterations [def: 40]
eps : float
convergence level [def: 0.001]
mn : float
minimum function values to be considered [def: min(f)]
mx : float
maximum function values to be considered [def: max(f)]
lam2 : float
constraint on 2nd derivative
weight : array(float)
vector of weights.
"""
mn = mn if mn is not None else np.min(f)
mx = mx if mx is not None else np.max(f)
f = np.asarray(f)
if x is None:
xx = np.linspace(-1, 1, num=f.size)
else:
xx = np.asarray(x)
if poly:
j = (f >= mn) & (f <= mx)
n = np.count_nonzero(j)
if n <= round(param):
return f
fmin = np.min(f[j]) - 1
fmax = np.max(f[j]) + 1
ff = (f[j] - fmin) / (fmax - fmin)
ff_old = ff
else:
fmin = np.min(f) - 1
fmax = np.max(f) + 1
ff = (f - fmin) / (fmax - fmin)
ff_old = ff
n = len(f)
for _ in range(iterations):
if poly:
param = round(param)
if param > 0:
t = median_filter(np.polyval(np.polyfit(xx, ff, param), xx), 3)
tmp = np.polyval(np.polyfit(xx, (t - ff) ** 2, param), xx)
else:
t = np.tile(np.polyfit(xx, ff, param), len(f))
tmp = np.tile(np.polyfit(xx, (t - ff) ** 2, param), len(f))
else:
t = median_filter(opt_filter(ff, param, weight=weight, lambda2=lambda2), 3)
tmp = opt_filter(
weight * (t - ff) ** 2, param, weight=weight, lambda2=lambda2
)
dev = np.sqrt(
|
np.clip(tmp, 0, None)
|
numpy.clip
|
import argparse
from pathlib import Path
import os.path
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import gym
from gymfc_nf.envs import *
from gymfc_nf.utils.monitor import CheckpointMonitor
from gymfc_nf.utils.log import make_header
from gymfc_nf.policies import PpoBaselinesPolicy
def generate_inputs(num_trials, max_rate, seed):
inputs = []
np.random.seed(seed)
for i in range(num_trials):
inputs.append(
|
np.random.normal(0, max_rate, size=3)
|
numpy.random.normal
|
import numpy as np
def ransac(x_train,y_train,x_test,min_pts,sigma,min_iter):
num,dim=np.shape(x_train)
itr=0
inlier=np.empty((num,min_iter))
while(itr<min_iter):
#Step 1: Randomly select min points for regression
ind=random.sample(range(0,num),min_pts)
xin=x_train[ind,:]
yin=y_train[ind,:]
#step 2: Perform regression
tmp=np.ones((min_pts,1))
Xin=np.hstack((tmp,xin))
iX=np.linalg.pinv(Xin)
Cin=np.dot(iX,yin)
#Step 3: inliers
X_train=np.hstack((np.ones((num,1)),x_train))
Din=np.abs((np.dot(X_train,Cin)-y_train)/np.sqrt(1+np.linalg.norm(Cin)**2))
cnd=(Din<=sigma) # Thresholding
inlier[:,itr]=cnd.flatten()
itr=itr+1
#Step 4: model selection
itrid=np.argmax(np.sum(inlier,axis=0))
ind=inlier[:,itrid]
ind=(ind==1)
num=np.sum(ind)
xin=x_train[ind,:]
yin=y_train[ind,:]
tmp=np.ones((num,1))
Xin=
|
np.hstack((tmp,xin))
|
numpy.hstack
|
from __future__ import print_function, division
import numpy as np
from openmdao.api import ExplicitComponent
from openaerostruct.aerodynamics.utils import _assemble_AIC_mtx, _assemble_AIC_mtx_b, _assemble_AIC_mtx_d
try:
from openaerostruct.fortran import OAS_API
fortran_flag = True
data_type = float
except:
fortran_flag = False
data_type = complex
compressible = False
class Forces(ExplicitComponent):
""" Compute aerodynamic forces acting on each section.
Note that the first two inputs and the output have the surface name
prepended on it. E.g., 'def_mesh' on a surface called 'wing' would be
'wing.def_mesh', etc.
Parameters
----------
def_mesh[nx, ny, 3] : numpy array
Array defining the nodal coordinates of the lifting surface.
b_pts[nx-1, ny, 3] : numpy array
Bound points for the horseshoe vortices, found along the 1/4 chord.
circulations[(nx-1)*(ny-1)] : numpy array
Flattened vector of horseshoe vortex strengths calculated by solving
the linear system of AIC_mtx * circulations = rhs, where rhs is
based on the air velocity at each collocation point.
alpha : float
Angle of attack in degrees.
v : float
Freestream air velocity in m/s.
rho : float
Air density in kg/m^3.
Returns
-------
sec_forces[nx-1, ny-1, 3] : numpy array
Contains the sectional forces acting on each panel.
Stored in Fortran order (only relevant with more than one chordwise
panel).
"""
def initialize(self):
self.options.declare('surfaces', types=list)
def setup(self):
self.surfaces = surfaces = self.options['surfaces']
tot_panels = 0
for surface in surfaces:
name = surface['name']
ny = surface['num_y']
nx = surface['num_x']
tot_panels += (nx - 1) * (ny - 1)
self.add_input(name + '_def_mesh', val=np.zeros((nx, ny, 3)), units='m')#, dtype=data_type))
self.add_input(name + '_b_pts', val=np.zeros((nx-1, ny, 3)), units='m')#, dtype=data_type))
self.add_input(name + '_cos_sweep', val=np.zeros((ny-1)), units='m')#, dtype=data_type))
self.add_input(name + '_widths', val=np.ones((ny-1)), units='m')#, dtype=data_type))
self.add_output(name + '_sec_forces', val=np.zeros((nx-1, ny-1, 3)), units='N')#, dtype=data_type))
self.tot_panels = tot_panels
self.add_input('circulations', val=np.zeros((tot_panels)), units='m**2/s')
self.add_input('alpha', val=3.)
self.add_input('M', val=0.1)
self.add_input('v', val=10., units='m/s')
self.add_input('rho', val=3., units='kg/m**3')
self.mtx = np.zeros((tot_panels, tot_panels, 3), dtype=data_type)
self.v = np.zeros((tot_panels, 3), dtype=data_type)
self.declare_partials('*', '*')
if not fortran_flag:
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
rho = inputs['rho']
cosa = np.cos(alpha)
sina = np.sin(alpha)
# that the collocation points used here are the midpoints of each
# bound vortex filament, not the collocation points from above
_assemble_AIC_mtx(self.mtx, inputs, self.surfaces, skip=True)
# Compute the induced velocities at the midpoints of the
# bound vortex filaments
for ind in range(3):
self.v[:, ind] = self.mtx[:, :, ind].dot(circ)
# Add the freestream velocity to the induced velocity so that
# self.v is the total velocity seen at the point
self.v[:, 0] += cosa * inputs['v']
self.v[:, 2] += sina * inputs['v']
i = 0
for surface in self.surfaces:
name = surface['name']
nx = surface['num_x']
ny = surface['num_y']
num_panels = (nx - 1) * (ny - 1)
b_pts = inputs[name + '_b_pts']
if fortran_flag:
sec_forces = OAS_API.oas_api.forcecalc(self.v[i:i+num_panels, :], circ[i:i+num_panels], rho, b_pts)
else:
# Get the vectors for each bound vortex of the horseshoe vortices
bound = b_pts[:, 1:, :] - b_pts[:, :-1, :]
# Cross the obtained velocities with the bound vortex filament
# vectors
cross = np.cross(self.v[i:i+num_panels],
bound.reshape(-1, bound.shape[-1], order='F'))
sec_forces = np.zeros((num_panels, 3), dtype=data_type)
# Compute the sectional forces acting on each panel
for ind in range(3):
sec_forces[:, ind] = \
(inputs['rho'] * circ[i:i+num_panels] * cross[:, ind])
# Reshape the forces into the expected form
forces = sec_forces.reshape((nx-1, ny-1, 3), order='F')
sweep_angle = inputs[name + '_cos_sweep'] / inputs[name + '_widths']
beta = np.sqrt(1 - inputs['M']**2 * sweep_angle**2)
if not compressible:
beta[:] = 1.
for j, B in enumerate(beta):
outputs[name + '_sec_forces'][:, j, :] = forces[:, j, :] / B
i += num_panels
if fortran_flag:
if 0:
def compute_jacvec_product(self, inputs, outputs, d_inputs, d_outputs, mode):
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
rho = inputs['rho']
cosa = np.cos(alpha)
sina = np.sin(alpha)
# Assemble a different matrix here than the AIC_mtx from above; Note
# that the collocation points used here are the midpoints of each
# bound vortex filament, not the collocation points from above
_assemble_AIC_mtx(self.mtx, inputs, self.surfaces, skip=True)
# Compute the induced velocities at the midpoints of the
# bound vortex filaments
for ind in range(3):
self.v[:, ind] = self.mtx[:, :, ind].dot(circ)
# Add the freestream velocity to the induced velocity so that
# self.v is the total velocity seen at the point
self.v[:, 0] += cosa * inputs['v']
self.v[:, 2] += sina * inputs['v']
if mode == 'fwd':
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
if 'alpha' in d_inputs:
alphad = d_inputs['alpha'] * np.pi / 180.
else:
alphad = 0.
if 'circulations' in d_inputs:
circ_d = d_inputs['circulations']
else:
circ_d = np.zeros(circ.shape)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosad = -sina * alphad
sinad = cosa * alphad
rho = inputs['rho']
v = inputs['v']
mtxd = np.zeros(self.mtx.shape)
# Actually assemble the AIC matrix
_assemble_AIC_mtx_d(mtxd, inputs, d_inputs, self.surfaces, skip=True)
vd = np.zeros(self.v.shape)
# Compute the induced velocities at the midpoints of the
# bound vortex filaments
for ind in range(3):
vd[:, ind] += mtxd[:, :, ind].dot(circ)
vd[:, ind] += self.mtx[:, :, ind].real.dot(circ_d)
# Add the freestream velocity to the induced velocity so that
# self.v is the total velocity seen at the point
if 'v' in d_inputs:
v_d = d_inputs['v']
else:
v_d = 0.
vd[:, 0] += cosa * v_d
vd[:, 2] += sina * v_d
vd[:, 0] += cosad * v
vd[:, 2] += sinad * v
if 'rho' in d_inputs:
rho_d = d_inputs['rho']
else:
rho_d = 0.
i = 0
rho = inputs['rho'].real
for surface in self.surfaces:
name = surface['name']
nx = surface['num_x']
ny = surface['num_y']
num_panels = (nx - 1) * (ny - 1)
b_pts = inputs[name + '_b_pts']
if name+'_b_pts' in d_inputs:
b_pts_d = d_inputs[name + '_b_pts']
else:
b_pts_d = np.zeros(b_pts.shape)
self.compute(inputs, outputs)
sec_forces = outputs[name + '_sec_forces'].real
sec_forces, sec_forcesd = OAS_API.oas_api.forcecalc_d(self.v[i:i+num_panels, :], vd[i:i+num_panels],
circ[i:i+num_panels], circ_d[i:i+num_panels],
rho, rho_d,
b_pts, b_pts_d)
d_outputs[name + '_sec_forces'] += sec_forcesd.reshape((nx-1, ny-1, 3), order='F')
i += num_panels
if mode == 'rev':
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
cosa = np.cos(alpha)
sina = np.sin(alpha)
i = 0
rho = inputs['rho'].real
v = inputs['v']
vb = np.zeros(self.v.shape)
for surface in self.surfaces:
name = surface['name']
nx = surface['num_x']
ny = surface['num_y']
num_panels = (nx - 1) * (ny - 1)
b_pts = inputs[name + '_b_pts']
sec_forcesb = d_outputs[name + '_sec_forces'].reshape((num_panels, 3), order='F')
v_b, circb, rhob, bptsb, _ = OAS_API.oas_api.forcecalc_b(self.v[i:i+num_panels, :], circ[i:i+num_panels], rho, b_pts, sec_forcesb)
if 'circulations' in d_inputs:
d_inputs['circulations'][i:i+num_panels] += circb
vb[i:i+num_panels] = v_b
if 'rho' in d_inputs:
d_inputs['rho'] += rhob
if name + '_b_pts' in d_inputs:
d_inputs[name + '_b_pts'] += bptsb
i += num_panels
sinab = inputs['v'] * np.sum(vb[:, 2])
if 'v' in d_inputs:
d_inputs['v'] += cosa * np.sum(vb[:, 0]) + sina * np.sum(vb[:, 2])
cosab = inputs['v'] * np.sum(vb[:, 0])
ab = np.cos(alpha) * sinab - np.sin(alpha) * cosab
if 'alpha' in d_inputs:
d_inputs['alpha'] += np.pi * ab / 180.
mtxb = np.zeros(self.mtx.shape)
circb = np.zeros(circ.shape)
for i in range(3):
for j in range(self.tot_panels):
mtxb[j, :, i] += circ * vb[j, i]
circb += self.mtx[j, :, i].real * vb[j, i]
if 'circulations' in d_inputs:
d_inputs['circulations'] += circb
_assemble_AIC_mtx_b(mtxb, inputs, d_inputs, self.surfaces, skip=True)
else:
def compute_partials(self, inputs, partials):
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
rho = inputs['rho']
cosa = np.cos(alpha)
sina = np.sin(alpha)
# Assemble a different matrix here than the AIC_mtx from above; Note
# that the collocation points used here are the midpoints of each
# bound vortex filament, not the collocation points from above
_assemble_AIC_mtx(self.mtx, inputs, self.surfaces, skip=True)
# Compute the induced velocities at the midpoints of the
# bound vortex filaments
for ind in range(3):
self.v[:, ind] = self.mtx[:, :, ind].dot(circ)
# Add the freestream velocity to the induced velocity so that
# self.v is the total velocity seen at the point
self.v[:, 0] += cosa * inputs['v']
self.v[:, 2] += sina * inputs['v']
not_real_outputs = {}
i = 0
for surface in self.surfaces:
name = surface['name']
nx = surface['num_x']
ny = surface['num_y']
num_panels = (nx - 1) * (ny - 1)
b_pts = inputs[name + '_b_pts']
sec_forces = OAS_API.oas_api.forcecalc(self.v[i:i+num_panels, :], circ[i:i+num_panels], rho, b_pts)
# Reshape the forces into the expected form
not_real_outputs[name + '_sec_forces'] = sec_forces.reshape((nx-1, ny-1, 3), order='F')
i += num_panels
for surface in self.surfaces:
name = surface['name']
dS = partials[name + '_sec_forces', name + '_cos_sweep'].copy()
d_inputs = {}
sec_forcesb = np.zeros((surface['num_x'] - 1, surface['num_y'] - 1, 3))
sweep_angle = inputs[name + '_cos_sweep'] / inputs[name + '_widths']
beta = np.sqrt(1 - inputs['M']**2 * sweep_angle**2)
if not compressible:
beta[:] = 1.
for k, val in enumerate(sec_forcesb.flatten()):
for key in inputs:
d_inputs[key] = inputs[key].copy()
d_inputs[key][:] = 0.
sec_forcesb[:] = 0.
sec_forcesb = sec_forcesb.flatten()
sec_forcesb[k] = 1.
sec_forcesb = sec_forcesb.reshape(surface['num_x'] - 1, surface['num_y'] - 1, 3)
for i, B in enumerate(beta):
sec_forcesb[:, i, :] /= B
sec_forcesb = sec_forcesb.reshape((-1, 3), order='F')
circ = inputs['circulations']
alpha = inputs['alpha'] * np.pi / 180.
cosa = np.cos(alpha)
sina = np.sin(alpha)
ind = 0
rho = inputs['rho'].real
v = inputs['v']
vb = np.zeros(self.v.shape)
for surface_ in self.surfaces:
name_ = surface_['name']
nx_ = surface_['num_x']
ny_ = surface_['num_y']
num_panels_ = (nx_ - 1) * (ny_ - 1)
if name == name_:
b_pts = inputs[name_ + '_b_pts']
v_b, circb, rhob, bptsb, _ = OAS_API.oas_api.forcecalc_b(self.v[ind:ind+num_panels_, :], circ[ind:ind+num_panels_], rho, b_pts, sec_forcesb)
if 'circulations' in d_inputs:
d_inputs['circulations'][ind:ind+num_panels_] += circb
vb[ind:ind+num_panels_] = v_b
if 'rho' in d_inputs:
d_inputs['rho'] += rhob
if name + '_b_pts' in d_inputs:
d_inputs[name_ + '_b_pts'] += bptsb
ind += num_panels_
sinab = inputs['v'] * np.sum(vb[:, 2])
if 'v' in d_inputs:
d_inputs['v'] += cosa * np.sum(vb[:, 0]) + sina * np.sum(vb[:, 2])
cosab = inputs['v'] * np.sum(vb[:, 0])
ab = np.cos(alpha) * sinab -
|
np.sin(alpha)
|
numpy.sin
|
import os
import numpy as np
from netCDF4 import Dataset
def load_region(region_id, local=False, return_regions=False):
if local:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2017.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["LokalOmr_2018"][:])
_regions = _vr.variables["LokalOmr_2018"][:]
else:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2019.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["skredomr19_km"][:])
_regions = _vr.variables["skredomr19_km"][:]
print("Missing value: {mv}".format(mv=_vr.variables["skredomr19_km"].missing_value))
_region_bounds = np.where(_regions == region_id) # just to get the bounding box
# get the lower left and upper right corner of a rectangle around the region
y_min, y_max, x_min, x_max = min(_region_bounds[0].flatten()), max(_region_bounds[0].flatten()), \
min(_region_bounds[1].flatten()), max(_region_bounds[1].flatten())
#reg_mask = np.ma.masked_where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max]).mask
#reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max], np.nan)
reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, 1., np.nan)
#reg_mask = np.ma.masked_where(_reg_mask == region_id).mask
_vr.close()
if return_regions:
return _regions, reg_mask, y_min, y_max, x_min, x_max
else:
return reg_mask, y_min, y_max, x_min, x_max
def clip_region(nc_variable, region_mask, t_index, y_min, y_max, x_min, x_max):
s = len(nc_variable.shape)
if s == 2:
#return np.flipud(region_mask * nc_variable[y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[y_min:y_max, x_min:x_max])
elif s == 3:
#return np.flipud(region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
elif s == 4:
#return np.flipud(region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
else:
print('Input array needs to have 2- to 4-dimensions: {0} were given.'.format(s))
if __name__ == "__main__":
import matplotlib.pyplot as plt
regions, region_mask, y_min, y_max, x_min, x_max = load_region(3013, return_regions=True)
print(region_mask, type(region_mask),
|
np.unique(region_mask)
|
numpy.unique
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from data_loading import *
from pytorch_utils import *
def get_r2(a,b):
N = len(a)
SS_tot = np.sum((b-
|
np.mean(b)
|
numpy.mean
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import networkx as nx
import re
import numpy as np
import itertools
_s = re.compile('\s+')
_p = re.compile('(\d+)\s+(\d+)')
def lsqp(atoms):
com = atoms.mean(axis=0)
#u, d, v = np.linalg.svd(atoms-com)
axes = np.zeros((len(atoms), 3))
for i in range(len(atoms)):
p1 = atoms[i]
if i == len(atoms)-1:
p2 = atoms[0]
else:
p2 = atoms[i+1]
a = np.cross(p1, p2)
axes += a
u, d, v = np.linalg.svd(axes)
i = 0
d = -np.dot(v[i], com)
n = -np.array((v[i,0], v[i,1], d))/v[i,2]
return v[i], com, n
def intriangle(triangle, axis, u, p):
# http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
p1, p2, p3 = triangle
w0 = p - p1
a = -np.dot(axis, w0)
b = np.dot(axis, u)
if (abs(b) < 0.01): return False
r = a / b
if r < 0.0: return False
if r > 1.0: return False
I = p + u * r
u = p2 - p1
v = p3 - p1
uu = np.dot(u, u)
uv = np.dot(u, v)
vv = np.dot(v, v)
w = I - p1
wu = np.dot(w, u)
wv = np.dot(w, v)
D = uv * uv - uu * vv
s = (uv * wv - vv * wu)/D
if (s < 0 or s > 1): return False
t = (uv * wu - uu * wv)/D
if (t < 0 or (s+t) > 1): return False
return True
def build_topology(psffile):
g = nx.Graph()
flag = 0
for line in open(psffile).readlines():
if flag == 0 and line.strip().endswith('NATOM'):
natom = int(line.strip().split()[0])
g.natom = natom
flag = 1
continue
if flag == 0 and line.strip().endswith('bonds'):
flag = 2
continue
if flag == 1 and not line.strip(): flag = 0
if flag == 2 and not line.strip(): break
if flag == 1:
num, segid, resid, resname, name = _s.split(line)[1:6]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
g.add_node(int(num), {'segid': segid, 'resname': resname, 'name': name, 'resid': resid})
if flag == 2:
for pair in _p.findall(line):
num1, num2 = map(int, pair)
if g.has_node(num1) and g.has_node(num2): g.add_edge(num1, num2)
return g
def build_atomtable(psf, crdfile):
crds = {}
flag = 0
for line in open(crdfile).readlines():
if line.startswith('*'): continue
if flag == 0:
flag = 1
continue
if flag == 1 and not line.strip(): break
if flag == 1:
num, resid, resname, name, x, y, z, segid = _s.split(line.strip())[:8]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
if psf.node[int(num)]['name'] != name: raise AtomMismatch("%d %s != %d %s" % (int(num), psf.node[int(num)]['name'], int(num), name))
crds[int(num)] = np.array((float(x), float(y), float(z)))
return crds
class AtomMismatch(Exception):
pass
def check_ring_penetration(psf, crd, pbc=[], xtl='rect', verbose=0):
# ring penetration test
# 1. find rings
# 2. build least square plane
# 3. project atoms ring constituent atoms onto the plane and build convex
# 4. find two bonded atoms that are at the opposite side of the plane
# 5. determine the point of intersection is enclosed in the ring
#
molecules = nx.connected_component_subgraphs(psf)
allatoms = np.array([crd[num] for num in psf.nodes()])
atoms_map = np.array([num for num in psf.nodes()])
natoms = len(allatoms)
if pbc:
atoms_map_reverse = {}
for i,num in enumerate(psf.nodes()):
atoms_map_reverse[num] = i
a = float(pbc[0])
b = float(pbc[1])
n = len(allatoms)
if xtl == 'rect':
allatoms = np.tile(allatoms, (9,1))
op = ((a,0),(a,b),(0,b),(-a,b),(-a,0),(-a,-b),(0,-b),(a,-b))
for i in range(8):
x,y = op[i]
allatoms[n*(i+1):n*(i+2),0] += x
allatoms[n*(i+1):n*(i+2),1] += y
atoms_map = np.tile(atoms_map, 9)
if xtl =='hexa':
allatoms = np.tile(allatoms, (7,1))
rot = lambda theta: np.matrix(((np.cos(np.radians(theta)), -np.sin(np.radians(theta))),
(np.sin(np.radians(theta)), np.cos(np.radians(theta)))))
op = (rot(15), rot(75), rot(135), rot(195), rot(255), rot(315))
d = np.array((a, 0))
for i in range(6):
xy = np.dot(d, op[i])
allatoms[n*(i+1):n*(i+2),:2] = allatoms[n*(i+1):n*(i+2),:2] + xy
atoms_map = np.tile(atoms_map, 7)
# print out image atoms
#fp = open('image.pdb', 'w')
#for i,atom in enumerate(allatoms):
# x, y, z = atom
# fp.write("HETATM%5d %-3s %3s %4d %8.3f%8.3f%8.3f 0.00 0.00 \n" % (i, 'C', 'DUM', i, x, y, z))
pen_pairs = []
pen_cycles = []
for m in molecules:
cycles = nx.cycle_basis(m)
if not cycles: continue
for cycle in cycles:
flag = False
atoms = np.array([crd[num] for num in cycle])
if len(set([psf.node[num]['resid'] for num in cycle])) > 1: continue
if verbose:
num = cycle[0]
print('found ring:', psf.node[num]['segid'], psf.node[num]['resid'], psf.node[num]['resname'])
# build least square fit plane
axis, com, n = lsqp(atoms)
# project atoms to the least square fit plane
for i,atom in enumerate(atoms):
w = np.dot(axis, atom-com)*axis + com
atoms[i] = com + (atom - w)
maxd = np.max(np.sqrt(np.sum(
|
np.square(atoms - com)
|
numpy.square
|
#! /usr/bin/env python
# Author: <NAME> (srinivas . zinka [at] gmail . com)
# Copyright (c) 2014 <NAME>
# License: New BSD License.
""" Program to draw a general conic section ...
for simple theoretical analysis refer:
http://zinka.files.wordpress.com/2010/06/conic-sections.pdf
"""
import matplotlib.pyplot as plt
from numpy import sqrt, arange, pi, cos, sin, set_printoptions, nan
set_printoptions(precision=2, threshold=nan, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None)
# ==============================================================================
# Parabola (Tested OK)
# ==============================================================================
# a = 2;
# t = arange(-10, 10, 0.001)
# phi = arange(0, 2 * pi, 0.001)
# rho = (2 * a) / (1 - cos(phi))
# x1 = rho * cos(phi)
# y1 = rho * sin(phi)
#
# plt.figure(1)
# plt.plot(t, sqrt(4 * a * (t + a)), 'k',
# t, -sqrt(4 * a * (t + a)), 'k',
# x1, y1, '--r')
# plt.axis([-20, 20, -20, 20])
# plt.grid(True)
#
# plt.show()
# ==============================================================================
# Ellipse (Tested OK)
# ==============================================================================
# a=6;
# b=4;
# t = arange(-10, 10, 0.001)
# phi = arange(0, 2*pi, 0.001)
# rho = (b**2)/(a-sqrt(a**2-b**2)*cos(phi))
# x1 = rho*cos(phi)
# y1 = rho*sin(phi)
#
# plt.figure(1)
##plt.plot(t, sqrt(b**2*(1-((t-sqrt(a**2-b**2))/(a))**2)),'k',
## t, -sqrt(b**2*(1-((t-sqrt(a**2-b**2))/(a))**2)), 'k',
## +sqrt(a**2-b**2), 0, 'o',
## x1, y1, 'k')
# plt.plot(x1, y1, 'k',
# +sqrt(a**2-b**2), 0, 'o',
# +sqrt(a**2-b**2)-(a**2)/(sqrt(a**2-b**2)), 0, 'd')
# plt.axis([-20, 20, -20, 20])
# plt.grid(True)
# plt.show()
# ==============================================================================
# Hyperbola (Tested OK)
# ==============================================================================
a = 6;
b = 4;
t = arange(-34.5, 20, 0.001)
phi = arange(0, 2 * pi, 0.001)
rho = (b ** 2) / (a - sqrt(a ** 2 + b ** 2) * cos(phi))
x1 = rho * cos(phi)
y1 = rho * sin(phi)
plt.figure(1)
plt.plot(t, sqrt(b ** 2 * (-1 + ((t + sqrt(a ** 2 + b ** 2)) / (a)) ** 2)), 'k',
t, -sqrt(b ** 2 * (-1 + ((t + sqrt(a ** 2 + b ** 2)) / (a)) ** 2)), 'k',
- sqrt(a ** 2 + b ** 2), 0, 'o',
- sqrt(a ** 2 + b ** 2) - (a ** 2) / (sqrt(a ** 2 + b ** 2)), 0, 'd',
-
|
sqrt(a ** 2 + b ** 2)
|
numpy.sqrt
|
import argparse
import configparser
import logging
import os
import os.path as osp
import numpy as np
import torch
from transformers import BertTokenizer
from bert_model import BertForSequenceEncoder
from models import inference_model
from kgat.train import correct_prediction
from kgat.analysis import analyze_results
from utils.utils_case_study import load_case_study
from utils.utils_misc import set_args_from_config, get_eval_report, print_results
from utils.utils_preprocess import get_train_test_readers
logger = logging.getLogger(__name__)
def eval_model(model, validset_reader, results_eval=None, args=None, epoch=0, writer=None, counters_test=None,
tokens_li=None):
model.eval()
correct_pred = 0.0
preds_all, labs_all, logits_all, filenames_test_all = [], [], [], []
for index, data in enumerate(validset_reader):
inputs, lab_tensor, filenames_test, aux_info, user_embed = data
prob, att_score_li = model(inputs, tokens_li, user_embed)
correct_pred += correct_prediction(prob, lab_tensor)
preds_all += prob.max(1)[1].tolist()
logits_all += prob.tolist()
labs_all += lab_tensor.tolist()
filenames_test_all += filenames_test
preds_np = np.array(preds_all)
labs_np = np.array(labs_all)
logits_np =
|
np.array(logits_all)
|
numpy.array
|
"""module for pipelined image reconstruction and analysis"""
import logging
import os
from numbers import Integral
from subprocess import call
from textwrap import dedent
import numpy as np
from niftypet import nimpa
from ..lm import dynamic_timings
from ..lm.mmrhist import mmrhist
from ..prj import mmrrec
from . import obtain_image
from .mmrimg import image_affine
log = logging.getLogger(__name__)
def mmrchain(
datain, # all input data in a dictionary
scanner_params, # all scanner parameters in one dictionary
# containing constants, transaxial and axial
# LUTs.
outpath=None, # output path for results
fout=None, # full file name (any folders and extensions are disregarded)
frames=None, # definition of time frames, default: ['fluid', [0, 0]]
mu_h=None, # hardware mu-map.
mu_o=None, # object mu-map.
tAffine=None, # affine transformations for the mu-map for
# each time frame separately.
itr=4, # number of OSEM iterations
fwhm=0., # Gaussian Post-Smoothing FWHM
psf=None, # Resolution Modelling
recmod=-1, # reconstruction mode: -1: undefined, chosen
# automatically. 3: attenuation and scatter
# correction, 1: attenuation correction
# only, 0: no correction (randoms only).
histo=None, # input histogram (from list-mode data);
# if not given, it will be performed.
decay_ref_time=None, # decay corrects relative to the reference
# time provided; otherwise corrects to the scan
# start time.
trim=False,
trim_scale=2,
trim_interp=0, # interpolation for upsampling used in PVC
trim_memlim=True, # reduced use of memory for machines
# with limited memory (slow though)
pvcroi=None, # ROI used for PVC. If undefined no PVC
# is performed.
pvcreg_tool='niftyreg', # the registration tool used in PVC
store_rois=False, # stores the image of PVC ROIs
# as defined in pvcroi.
pvcpsf=None,
pvcitr=5,
fcomment='', # text comment used in the file name of
# generated image files
ret_sinos=False, # return prompt, scatter and randoms
# sinograms for each reconstruction
ret_histo=False, # return histogram (LM processing output) for
# each image frame
store_img=True,
store_img_intrmd=False,
store_itr=None, # store any reconstruction iteration in
# the list. ignored if the list is empty.
del_img_intrmd=False,
):
if frames is None:
frames = ['fluid', [0, 0]]
if mu_h is None:
mu_h = []
if mu_o is None:
mu_o = []
if pvcroi is None:
pvcroi = []
if pvcpsf is None:
pvcpsf = []
if store_itr is None:
store_itr = []
# decompose all the scanner parameters and constants
Cnt = scanner_params['Cnt']
# -------------------------------------------------------------------------
# HISOTGRAM PRECEEDS FRAMES
if histo is not None and 'psino' in histo:
frames = ['fluid', [histo['t0'], histo['t1']]]
else:
histo = None
log.warning(
'the given histogram does not contain a prompt sinogram--will generate a histogram.')
# FRAMES
# check for the provided dynamic frames
if isinstance(frames, list):
# Can be given in three ways:
# * a 1D list (duration of each frame is listed)
# * a more concise 2D list--repetition and duration lists in
# each entry. Must start with the 'def' entry.
# * a 2D list with fluid timings: must start with the string
# 'fluid' or 'timings'. a 2D list with consecutive lists
# describing start and end of the time frame, [t0, t1];
# The number of time frames for this option is unlimited,
# provided the t0 and t1 are within the acquisition times.
# 2D starting with entry 'fluid' or 'timings'
if (isinstance(frames[0], str) and frames[0] in ('fluid', 'timings')
and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
t_frms = frames[1:]
# if 2D definitions, starting with entry 'def':
elif (isinstance(frames[0], str) and frames[0] == 'def'
and all(isinstance(t, list) and len(t) == 2 for t in frames[1:])):
# get total time and list of all time frames
dfrms = dynamic_timings(frames)
t_frms = dfrms[1:]
# if 1D:
elif all(isinstance(t, Integral) for t in frames):
# get total time and list of all time frames
dfrms = dynamic_timings(frames)
t_frms = dfrms[1:]
else:
log.error('osemdyn: frames definitions are not given\
in the correct list format: 1D [15,15,30,30,...]\
or 2D list [[2,15], [2,30], ...]')
else:
log.error(
'provided dynamic frames definitions are incorrect (should be a list of definitions).')
raise TypeError('Wrong data type for dynamic frames')
# number of dynamic time frames
nfrm = len(t_frms)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# create folders for results
if outpath is None:
petdir = os.path.join(datain['corepath'], 'reconstructed')
fmudir = os.path.join(datain['corepath'], 'mumap-obj')
pvcdir = os.path.join(datain['corepath'], 'PRCL')
else:
petdir = os.path.join(outpath, 'PET')
fmudir = os.path.join(outpath, 'mumap-obj')
pvcdir = os.path.join(outpath, 'PRCL')
if fout is not None:
# > get rid of folders
fout = os.path.basename(fout)
# > get rid of extension
fout = fout.split('.')[0]
# folder for co-registered mu-maps (for motion compensation)
fmureg = os.path.join(fmudir, 'registered')
# folder for affine transformation MR/CT->PET
petaff = os.path.join(petdir, 'faffine')
# folder for reconstructed images (dynamic or static depending on number of frames).
if nfrm > 1:
petimg = os.path.join(petdir, 'multiple-frames')
pvcdir = os.path.join(pvcdir, 'multiple-frames')
elif nfrm == 1:
petimg = os.path.join(petdir, 'single-frame')
pvcdir = os.path.join(pvcdir, 'single-frame')
else:
raise TypeError('Unrecognised/confusing time frames!')
# create now the folder
nimpa.create_dir(petimg)
# create folder
nimpa.create_dir(petdir)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# MU-MAPS
# get the mu-maps, if given; otherwise will use blank mu-maps.
if tAffine is not None:
muod = obtain_image(mu_o, imtype='object mu-map')
else:
muod = obtain_image(mu_o, Cnt=Cnt, imtype='object mu-map')
# hardware mu-map
muhd = obtain_image(mu_h, Cnt, imtype='hardware mu-map')
# choose the mode of reconstruction based on the provided (or not) mu-maps
if muod['exists'] and muhd['exists'] and recmod == -1:
recmod = 3
elif (muod['exists'] or muhd['exists']) and recmod == -1:
recmod = 1
log.warning('partial mu-map: scatter correction is switched off.')
else:
if recmod == -1:
recmod = 0
log.warning(
'no mu-map provided: scatter and attenuation corrections are switched off.')
# -------------------------------------------------------------------------
# import pdb; pdb.set_trace()
# output dictionary
output = {}
output['recmod'] = recmod
output['frames'] = t_frms
output['#frames'] = nfrm
# if affine transformation is given
# the baseline mu-map in NIfTI file or dictionary has to be given
if tAffine is None:
log.info('using the provided mu-map the same way for all frames.')
else:
if len(tAffine) != nfrm:
raise ValueError("the number of affine transformations in the list"
" has to be the same as the number of dynamic frames")
elif not isinstance(tAffine, list):
raise ValueError("tAffine has to be a list of either 4x4 numpy arrays"
" of affine transformations or a list of file path strings")
elif 'fim' not in muod:
raise NameError("when tAffine is given, the object mu-map has to be"
" provided either as a dictionary or NIfTI file")
# check if all are file path strings to the existing files
if all(isinstance(t, str) for t in tAffine):
if all(os.path.isfile(t) for t in tAffine):
# the internal list of affine transformations
faff_frms = tAffine
log.info('using provided paths to affine transformations for each dynamic frame.')
else:
raise IOError('not all provided paths are valid!')
# check if all are numpy arrays
elif all(isinstance(t, (np.ndarray, np.generic)) for t in tAffine):
# create the folder for dynamic affine transformations
nimpa.create_dir(petaff)
faff_frms = []
for i in range(nfrm):
fout_ = os.path.join(petaff, 'affine_frame(' + str(i) + ').txt')
np.savetxt(fout_, tAffine[i], fmt='%3.9f')
faff_frms.append(fout_)
log.info('using provided numpy arrays affine transformations for each dynamic frame.')
else:
raise ValueError(
'Affine transformations for each dynamic frame could not be established.')
# -------------------------------------------------------------------------------------
# get ref image for mu-map resampling
# -------------------------------------------------------------------------------------
if 'fmuref' in muod:
fmuref = muod['fmuref']
log.info('reusing the reference mu-map from the object mu-map dictionary.')
else:
# create folder if doesn't exists
nimpa.create_dir(fmudir)
# ref file name
fmuref = os.path.join(fmudir, 'muref.nii.gz')
# ref affine
B = image_affine(datain, Cnt, gantry_offset=False)
# ref image (blank)
im = np.zeros((Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMX']), dtype=np.float32)
# store ref image
nimpa.array2nii(im, B, fmuref)
log.info('generated a reference mu-map in:\n{}'.format(fmuref))
# -------------------------------------------------------------------------------------
output['fmuref'] = fmuref
output['faffine'] = faff_frms
# output list of intermediate file names for mu-maps and PET images
# (useful for dynamic imaging)
if tAffine is not None: output['fmureg'] = []
if store_img_intrmd:
output['fpeti'] = []
if fwhm > 0:
output['fsmoi'] = []
# > number of3D sinograms
if Cnt['SPN'] == 1:
snno = Cnt['NSN1']
elif Cnt['SPN'] == 11:
snno = Cnt['NSN11']
else:
raise ValueError('unrecognised span: {}'.format(Cnt['SPN']))
# dynamic images in one numpy array
dynim = np.zeros((nfrm, Cnt['SO_IMZ'], Cnt['SO_IMY'], Cnt['SO_IMY']), dtype=np.float32)
# if asked, output only scatter+randoms sinogram for each frame
if ret_sinos and itr > 1 and recmod > 2:
dynmsk = np.zeros((nfrm, Cnt['NSEG0'], Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
dynrsn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
dynssn = np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
dynpsn =
|
np.zeros((nfrm, snno, Cnt['NSANGLES'], Cnt['NSBINS']), dtype=np.float32)
|
numpy.zeros
|
import math
import pprint
import matplotlib.pyplot as plt
import numpy
from scipy.io.wavfile import write
import keys
import notes
from notes import natural, sharp, flat
note_map = notes.create_note_map()
sample_freq = 44100
note_duration = 0.5
note_spacing = 0.3
note_linspace = numpy.linspace(0, 1, sample_freq * note_duration)
@numpy.vectorize
def note_fade_function(t):
return ((1 - t) ** 3) * t
note_fade_vector = note_fade_function(numpy.linspace(0, 1, sample_freq * note_duration))
def write_music(note_list):
#create the array that will hold all of the audio data from start to end
audio_data = numpy.zeros(sample_freq * (note_spacing * (len(note_list) - 1) + note_duration))
for i, note in enumerate(note_list):
#create the function which will convert the time input into a value at that time, based on the frequency for this note
note_freq = note_map[note] * 2 * math.pi
reference_freq = note_map['C#4'] * 2 * math.pi
@numpy.vectorize
def sample_function(t):
#notes with a higher frequency should have a smaller amplitude
amplitude = math.log(reference_freq / note_freq + 1)
#i got this by playing around with it - we play the main note, then a weak version of the same note an octave down
return amplitude * (math.sin(note_freq * t) + 0.1 * math.sin(note_freq * 0.5 * t) + 0.01 * math.sin(note_freq * 0.25 * t))
#apply the sample function to the linear space. this will give a sampled sine wave
note_data = sample_function(numpy.linspace(0, note_duration, sample_freq * note_duration)) * note_fade_vector
#add this note data to the audio data
note_begin = note_spacing * sample_freq * i
audio_data[note_begin:note_begin + len(note_data)] += note_data
scaled = numpy.int16(audio_data/numpy.max(
|
numpy.abs(audio_data)
|
numpy.abs
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes Approximation error.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v1 as tf
def w_star(d, F, V): # pylint: disable=invalid-name
r"""Best d-dimensional linear model.
Args:
d: int, top d projection of the left singular vectors.
F: S \times S matrix, left singular vectors.
V: S \times 1 matrix, values of the states.
Returns:
weight vector: array of size d
"""
return
|
np.linalg.pinv(F[:, :d])
|
numpy.linalg.pinv
|
import os
import re
import sys
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.gridspec as gridspec
from itertools import permutations, product
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
import seaborn as sns
from scipy.optimize import curve_fit
import utils
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amsmath}']
DATA_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Data_for_figs/'
FIGS_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Figs/'
REAL_DIR = '/home/johnmcbride/projects/Scales/Data_compare/Processed/Real'
BIASES = ['none', 'S#1_n1', 'S#1_n2',#'',
'distI_n1', 'distI_n2', 'distI_n3', 'distW',#'',
'distI_n1_S#1_n1', 'distI_n1_S#1_n2', 'distI_n2_S#1_n1', 'distI_n2_S#1_n2',
'distW_S#1_n1', 'distW_S#1_n2', 'distW_S#2_n2', 'distW_S#2_n3',
'hs_n1_w05', 'hs_n1_w10', 'hs_n1_w15', 'hs_n1_w20',
'hs_n2_w05', 'hs_n2_w10', 'hs_n2_w15', 'hs_n2_w20',
'hs_n3_w05', 'hs_n3_w10', 'hs_n3_w15', 'hs_n3_w20',
'hs_r3_w05', 'hs_r3_w10', 'hs_r3_w15', 'hs_r3_w20'] + \
[f"im5_r{r:3.1f}_w{w:02d}" for r in [0, 0.5, 1, 2] for w in [5,10,15,20]] + \
[f"Nim5_r0.0_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n1_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n2_w{w:02d}" for w in [10,20]] + \
[f"Nhs_n3_w{w:02d}" for w in [10,20]]
BIAS_GROUPS = ['none', 'S#1', 'HS',
'distW', 'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1']
BIAS_GROUPS = ['none', 'HS',
'S#1', 'distW',
'distW_S#1', 'distW_S#2',
'distI', 'distI_S#1', 'im5', 'AHS']
groups = ['none'] + ['S#1']*2 + ['distI']*3 + ['distW'] + ['distI_S#1']*4 + \
['distW_S#1']*2 + ['distW_S#2']*2 + ['HS']*12 + ['im5']*24 + ['HS']*8
BIAS_KEY = {BIASES[i]:groups[i] for i in range(len(BIASES))}
def plot_MC_dist(fName, X='pair_ints', out=False, f=False, cum=False):
df = pd.read_feather(fName)
if f:
sns.distplot(df[X], bins=100)
else:
if cum:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100, hist_kws=dict(cumulative=True), kde_kws=dict(cumulative=True))
else:
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100)
if out:
return df
def plot_MC_kde(fName, X='pair_ints', out=False, f=False, ax='None'):
df = pd.read_feather(fName)
if f:
sns.kdeplot(df[X])
else:
sns.kdeplot(utils.extract_floats_from_string(df[X]))
if out:
return df
def rename_biases(df):
df.loc[df.bias=='distI_1_0', 'bias'] = 'distI_n1'
df.loc[df.bias=='distI_2_0', 'bias'] = 'distI_n2'
df.loc[df.bias=='distI_3_0', 'bias'] = 'distI_n3'
df.loc[df.bias=='distI_0_1', 'bias'] = 'S#1_n1'
df.loc[df.bias=='distI_0_2', 'bias'] = 'S#1_n2'
df.loc[df.bias=='distI_1_1', 'bias'] = 'distI_n1_S#1_n1'
df.loc[df.bias=='distI_1_2', 'bias'] = 'distI_n1_S#1_n2'
df.loc[df.bias=='distI_2_1', 'bias'] = 'distI_n2_S#1_n1'
df.loc[df.bias=='distI_2_2', 'bias'] = 'distI_n2_S#1_n2'
df.loc[df.bias=='opt_c', 'bias'] = 'distW'
df.loc[df.bias=='opt_c_I1', 'bias'] = 'distW_S#1_n1'
df.loc[df.bias=='opt_c_I2', 'bias'] = 'distW_S#1_n2'
df.loc[df.bias=='opt_c_s2', 'bias'] = 'distW_S#2_n2'
df.loc[df.bias=='opt_c_s3', 'bias'] = 'distW_S#2_n3'
return df
def rename_bias_groups(df):
df.loc[df.bias_group=='distI+small', 'bias_group'] = 'distI_S#1'
df.loc[df.bias_group=='distW+I', 'bias_group'] = 'distW_S#1'
df.loc[df.bias_group=='distW+S', 'bias_group'] = 'distW_S#2'
df.loc[df.bias_group=='small', 'bias_group'] = 'S#1'
df.loc[df.bias_group=='hs', 'bias_group'] = 'HS'
return df
def plot_violin(df, cat='pair_ints', X='bias_group', Y='JSD', kind='violin'):
df = rename_bias_groups(df)
violin_order = get_violin_order(df, X, Y)
sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind=kind, order=violin_order)
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='violin', order=[0.0, 50.0, 60., 70., 80., 90., 100.])
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='boxen', order=[0.0, 50.0, 60., 70., 80., 90., 100.])
# sns.catplot(x=X, y=Y, data=df.loc[df.cat==cat], kind='boxen', order=[400., 450., 500., 550., 1200.])
def get_violin_order(df, X, Y):
groups = np.array(df[X].unique())
min_J = [df.loc[(df[X]==g)&(df.cat=='pair_ints'),Y].min() for g in groups]
if 'fr' in Y:
violin_order = groups[np.argsort(min_J)[::-1]]
else:
violin_order = groups[np.argsort(min_J)]
return violin_order
def df_distplot_with_constraints(df, bias, MI, MA, q, cat='pair_ints', ret=0):
if 'hs' in bias:
cut = df.loc[(df.min_int>MI)&(df.max_int<MA), bias].quantile(1.-q)
print(cut)
tmp_df = df.loc[(df[bias]>cut)&(df.min_int>MI)&(df.max_int<MA)]
sns.distplot(utils.extract_floats_from_string(tmp_df.loc[:,cat]), bins=100, label=bias)
else:
cut = df.loc[(df.min_int>MI)&(df.max_int<MA), bias].quantile(q)
tmp_df = df.loc[(df[bias]<cut)&(df.min_int>MI)&(df.max_int<MA)]
sns.distplot(utils.extract_floats_from_string(tmp_df.loc[:,cat]), bins=100, label=bias)
plt.legend(loc='best')
if ret:
return tmp_df
def get_files_and_labels_from_idx(df, idx, kde=True, hist=False):
fNames = []
labels = []
for i in idx:
if kde:
fNames.append(df.loc[i, 'kde_path'])
labels.append("kde: int=[{0[0]}-{0[1]}]; beta={0[2]}".format(df.loc[i, ['min_int', 'max_int', 'beta']]))
if hist:
fNames.append(df.loc[i, 'hist_path'])
labels.append("hist: int=[{0[0]}-{0[1]}]; beta={0[2]}".format(df.loc[i, ['min_int', 'max_int', 'beta']]))
return fNames, labels
def plot_harmonic_similarity_distributions(df_grid, df_real, cat='Continent', leg=True, n=5, m=1):
fig, ax = plt.subplots(4,1)
ax = ax.reshape(ax.size)
# fig, ax = plt.subplots(1,1)
# ax = [ax]
# plt.subplots_adjust(hspace=0.8)
for i, lbl in enumerate([f'hs_n{m}_w{x:02d}' for x in range(5,25,5)]):
# for i, lbl in enumerate([f'hs_n{m}_w{x:02d}' for x in range(10,15,5)]):
sns.distplot(df_grid[lbl], label='no_constraint', ax=ax[i], color='k')
for c in df_real[cat].unique():
sns.kdeplot(df_real.loc[(df_real[cat]==c)&(df_real.n_notes==n), lbl], ax=ax[i], label=c)
if leg and not i:
# ax[i].legend(loc='best', frameon=False)
# ax[i].legend(bbox_to_anchor=(0.39, 1.3), frameon=False, ncol=7)
handles, labels = ax[i].get_legend_handles_labels()
ax[i].get_legend().set_visible(False)
else:
ax[i].get_legend().set_visible(False)
ax[i].set_ylabel('Prob')
if leg:
fig.legend(handles, labels, loc='upper center', frameon=False, ncol=4)
fig.savefig(f"{FIGS_DIR}/harm_sim_dist_notes{n}_ver{m}.png")
fig.savefig(f"{FIGS_DIR}/harm_sim_dist_notes{n}_ver{m}.pdf")
def plot_dists_by_npy_file(files, labels, real=True, kde=True, hist=False, n=7):
fig, ax = plt.subplots()
if hist or sum([1 for f in files if 'hist' in f]):
ax2 = ax.twinx()
if real:
if kde:
data = np.load(os.path.join(REAL_DIR, f"n_{n}_kde.npy"))
ax.plot(data[:,0], data[:,1], label='real_kde')
if hist:
data = np.load(os.path.join(REAL_DIR, f"n_{n}_hist.npy"))
ax2.plot(data[:,0], data[:,1], label='real_hist')
for i, f in enumerate(files):
data = np.load(f)
if 'hist' in f:
ax2.plot(data[:,0], data[:,1], label=labels[i])
else:
ax.plot(data[:,0], data[:,1], label=labels[i])
ax.legend(loc='best')
plt.show()
def set_xticks(ax, xMaj, xMin, xForm):
ax.xaxis.set_major_locator(MultipleLocator(xMaj))
ax.xaxis.set_major_formatter(FormatStrFormatter(xForm))
ax.xaxis.set_minor_locator(MultipleLocator(xMin))
def plot_dist_by_cat(df, X='scale', cat='Continent', lim=(-5,1250), bins=120):
uni_cat = np.array(sorted(df.loc[:,cat].unique()))
if cat=='n_notes':
uni_cat = np.array([4,5,6,7,8,9])
n_cat = uni_cat.size
if n_cat <=6:
fig, ax = plt.subplots(3,2, sharex=True)
elif n_cat <=12:
fig, ax = plt.subplots(4,3, sharex=True)
else:
print(n_cat, ' too large')
fig2, ax2 = plt.subplots()
ax = ax.reshape(ax.size)
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
if not isinstance(df.loc[idx[0],X], str):#
Xarr = df.loc[idx,X]
Xarr2 = [a for a in df.loc[idx,X] if 0<a<1200]
else:
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
Xarr2 = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a if 0<b<1200]
sns.distplot(Xarr, bins=bins, label=str(uni), ax=ax[i])
sns.kdeplot(Xarr2, label=str(uni), ax=ax2)
ax[i].legend(loc='best')
ax2.legend(loc='best')
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[-1].set_xlim(lim)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
ax2.set_xlim(lim)
# ax2.xaxis.set_ticks(np.arange(0,1300,100))
plt.show()
def plot_score_histograms(df):
fig, ax = plt.subplots()
uni_cat = np.array([4,5,6,7,8,9])
for n in uni_cat:
sns.distplot(df.loc[df.n_notes==n, 'harm_sim'], label=str(n), kde=True, bins=40, ax=ax)
ax.legend(loc='best')
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_similar_cultures(df, X='scale', cat='Continent', lim=(-5,1250)):
groups = [ ['Western', 'East Asia', 'South Asia'],
['Western', 'Oceania'],
['Western', 'South America'],
['South East Asia', 'Africa'],
['Western', 'Middle East']]
fig, ax = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
extra_ax = []
for i, group in enumerate(groups):
# idx = df.loc[df.loc[:,cat].apply(lambda x: x in uni),:].index
for j, uni in enumerate(group):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, ax=ax[i], label=str(uni), kde=False, norm_hist=True)
ax[i].legend(loc='best', frameon=False)
ax[0].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[4].set_ylabel('Probability')
ax[4].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
# ticks = np.arange(0, (int(lim[1]/100)+1)*100, 400)
# ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_xlim(lim)
set_xticks(ax[-1], 200, 100, '%d')
# plt.savefig('Figs/culture_scale_comparison.png')
plt.show()
# This was used for creating a figure for my paper
def plot_similar_cultures_2(df, X='scale', cat='Continent', lim=(-5,1250)):
groups = [ [], ['Western', 'East Asia', 'South Asia', 'Middle East'],
['Oceania', 'South America', 'South East Asia', 'Africa']]
fig, ax = plt.subplots(3,1, sharex=True)
fig2, ax2 = plt.subplots(8,1, sharex=True)
plt.subplots_adjust(wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
ax2 = ax2.reshape(ax2.size)
extra_ax = []
lbls = ['All', 'Theory', 'Instrument']
cols = sns.color_palette('colorblind')
for i, group in enumerate(groups):
# idx = df.loc[df.loc[:,cat].apply(lambda x: x in uni),:].index
if i:
for j, uni in enumerate(group):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, ax=ax2[j+(i-1)*4], label=f"{str(uni):15s} N={len(idx)}", kde=False, norm_hist=True, color=cols[j+(i-1)*4])
sns.kdeplot(Xarr, ax=ax[i], label=f"{str(uni):15s} N={len(idx)}", clip=(5, 1150), color=cols[j+(i-1)*4])
ax2[j+(i-1)*4].legend(loc='upper right', frameon=False)
else:
for j, g in enumerate(groups[:]):
if j:
idx = df.loc[df.loc[:,cat].apply(lambda x: x in g),:].index
else:
idx = df.index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.distplot(Xarr, bins=120, ax=ax[i], label=lbls[j], kde=False, norm_hist=True)
sns.kdeplot(Xarr, ax=ax[i], label=f"{lbls[j]:15s} N={len(idx)}", clip=(5, 1150))
ax[i].legend(loc='best', frameon=False)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Intervals size (cents)')
# ax[5].set_xlabel('Notes in scale (cents)')
# ticks = np.arange(0, (int(lim[1]/100)+1)*100, 400)
# ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_xlim(lim)
set_xticks(ax[-1], 200, 100, '%d')
# plt.savefig('Figs/culture_scale_comparison.png')
fig.savefig(os.path.join(FIGS_DIR, 'database_intervals_kde.png'))
fig.savefig(os.path.join(FIGS_DIR, 'database_intervals_kde.pdf'))
fig2.savefig(os.path.join(FIGS_DIR, 'database_intervals_hist.png'))
fig2.savefig(os.path.join(FIGS_DIR, 'database_intervals_hist.pdf'))
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_comparison_ints_by_n(df, X='pair_ints', cat='n_notes', lim=(-5, 605)):
uni_cat = np.array([4,5,6,7,8,9])
fig2, ax2 = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax2 = ax2.reshape(ax2.size)[[0,2,4,1,3,5]]
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax2[-1].xaxis.set_ticks(ticks)
ax2[-1].set_xlim(lim)
ax2[0].set_ylabel('Probability')
ax2[1].set_ylabel('Probability')
ax2[2].set_ylabel('Probability')
ax2[2].set_xlabel('Interval size (cents)')
ax2[5].set_xlabel('Interval size (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax2[i])
ax2[i].set_title("N = {0}".format(uni))
ax2[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_set_intervals.png')
plt.show()
fig, ax = plt.subplots(3,2, sharex=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax[-1].set_xlim(lim)
ax[-1].xaxis.set_ticks(ticks)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Interval size (cents)')
ax[5].set_xlabel('Interval size (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.kdeplot(Xarr, label="my dataset", ax=ax[i])
sns.kdeplot(Xarr, ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
# ax[5].legend(loc='upper right', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/unrestricted_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="any intervals")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_comparison_1.png')
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/restricted_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="constrained")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_comparison_2.png')
plt.show()
# This was used for creating a figure for my CSLM seminar
def plot_comparison_ints_by_n_bias(df, X='pair_ints', cat='n_notes', lim=(-5, 605)):
uni_cat = np.array([4,5,6,7,8,9])
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
ax[5].legend(loc='best', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/biased_ints_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="bias model")
ax[5].legend(loc='best', frameon=False)
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Interval size (cents)')
ax[5].set_xlabel('Interval size (cents)')
ax[-1].set_xlim(lim)
ax[-1].set_ylim(0,0.015)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
plt.savefig('Figs/data_model_comparison_3.png')
plt.show()
def plot_comparison_scale_by_n(df, X='scale', cat='n_notes', lim=(-5,1250)):
uni_cat = np.array([4,5,6,7,8,9])
fig2, ax2 = plt.subplots(3,2, sharex=True)
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax2 = ax2.reshape(ax2.size)[[0,2,4,1,3,5]]
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax2[-1].xaxis.set_ticks(ticks)
ax2[-1].set_xlim(lim)
ax2[0].set_ylabel('Probability')
ax2[1].set_ylabel('Probability')
ax2[2].set_ylabel('Probability')
ax2[2].set_xlabel('Notes in scale (cents)')
ax2[5].set_xlabel('Notes in scale (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax2[i])
ax2[i].set_title("N = {0}".format(uni))
ax2[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_set_intervals.png')
plt.show()
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
plt.subplots_adjust(wspace=0.4, hspace=0.3)
ax[-1].set_xlim(lim)
ax[-1].xaxis.set_ticks(ticks)
ax[-1].set_ylim(0,0.005)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
# sns.kdeplot(Xarr, label="my dataset", ax=ax[i])
sns.distplot(Xarr, ax=ax[i], bins=120)
ax[i].set_title("N = {0}".format(uni))
# ax[5].legend(loc='upper right', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/unrestricted_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="any intervals")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_scale_comparison_1.png')
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/restricted_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="constrained")
ax[5].legend(loc='best', frameon=False)
plt.savefig('Figs/data_model_scale_comparison_2.png')
plt.show()
def plot_comparison_scale_by_n_bias(df, X='scale', cat='n_notes', lim=(-5,1250)):
uni_cat = np.array([4,5,6,7,8,9])
fig, ax = plt.subplots(3,2, sharex=True, sharey=True)
ax = ax.reshape(ax.size)[[0,2,4,1,3,5]]
for i, uni in enumerate(uni_cat):
idx = df.loc[df.loc[:,cat]==uni,:].index
Xarr = [b for a in df.loc[idx,X].apply(lambda x: [float(y) for y in x.split(';')]) for b in a]
sns.distplot(Xarr, bins=120, label="my dataset", ax=ax[i])
ax[i].set_title("N = {0}".format(uni))
ax[5].legend(loc='best', frameon=False)
for i, n in enumerate(uni_cat):
data = np.load("Data_for_figs/biased_scale_hist_n{0}.npy".format(n))
ax[i].plot(data[:,0], data[:,1], '-', label="bias model")
ax[5].legend(loc='best', frameon=False)
ticks = np.arange(0, (int(lim[1]/100)+1)*100, 100)
ax[0].set_ylabel('Probability')
ax[1].set_ylabel('Probability')
ax[2].set_ylabel('Probability')
ax[2].set_xlabel('Notes in scale (cents)')
ax[5].set_xlabel('Notes in scale (cents)')
ax[-1].set_xlim(lim)
ax[-1].set_ylim(0,0.005)
# ax[-1].xaxis.set_ticks(np.arange(0,1300,100))
plt.savefig('Figs/data_model_scale_comparison_3.png')
plt.show()
def subplot_x_y(n):
if n == 1:
return [1]*2
elif n**0.5 == int(n**0.5):
return [int(n**0.5)]*2
else:
x = int(n**0.5)
y = x + 1
switch = 0
while n > x*y:
if switch:
x += 1
switch = 0
else:
y += 1
switch = 1
return y, x
def plot_best_pair_dist(df_best, df_m, df_real, X='pair_ints', n=7):
sub_y, sub_x = subplot_x_y(len(df_best))
fig, ax = plt.subplots(sub_y, sub_x, sharex=True, sharey=True)
try:
ax = ax.reshape(ax.size)
except:
ax = [ax]
for i in range(len(ax)):
sns.distplot(utils.extract_floats_from_string(df_real.loc[df_real.n_notes==n, X]), bins=100, ax=ax[i])
df = pd.read_feather(df_m.loc[df_best.loc[i, f"idx_{n}"], 'fName'] )
sns.distplot(utils.extract_floats_from_string(df[X]), bins=100, ax=ax[i])
# ax[i].set_title(df_best.loc[i, 'bias'])
def simple_fit(X, Y, fit_fn='None'):
min_idx = np.argmin(X)
max_idx = np.argmax(X)
dX = X[max_idx] - X[min_idx]
dY = Y[max_idx] - Y[min_idx]
if fit_fn == 'None':
fit_fn = lambda x, m, a: m*x + a
popt, pcov = curve_fit(fit_fn, X, Y, p0=[dY/dX, Y[max_idx]])
else:
popt, pcov = curve_fit(fit_fn, X, Y, p0=[dY**2/dX**2, dY/dX, Y[max_idx]])
xnew = np.linspace(X[min_idx], X[max_idx], 10)
ynew = fit_fn(xnew, *popt)
return xnew, ynew, popt
def plot_JSD_vs_scales(df, X='JSD', Y='fr_20', bias_group='HS', n=5, fit=False):
df = rename_bias_groups(df)
df = rename_biases(df)
biases = [b for b in BIASES if BIAS_KEY[b]==bias_group]
sub_y, sub_x = subplot_x_y(len(biases))
sub_y, sub_x = 6,4
fig, ax = plt.subplots(sub_y, sub_x, sharex=True, sharey=True)
try:
ax = ax.reshape(ax.size)
except:
ax = [ax]
for i, bias in enumerate(biases):
if not len(bias):
continue
if n:
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias_group==bias_group)], ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias==bias)], ax=ax[i])
if fit:
x_fit, y_fit, popt = simple_fit(df.loc[(df.n_notes==n)&(df.bias==bias), X], df.loc[(df.n_notes==n)&(df.bias==bias), Y])
ax[i].plot(x_fit, y_fit)
ax[i].text(0.2, .20, f"m={popt[0]:7.5f}", transform=ax[i].transAxes)
else:
sns.scatterplot(x=X, y=Y, data=df, ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.bias==bias)], ax=ax[i])
print(bias)
ax[i].set_title(''.join(bias.split('_')))
def plot_JSD_vs_scales_bias_group(df, X='JSD', Y='fr_20', save=False, n=5):
df = rename_bias_groups(df)
df = rename_biases(df)
fig, ax = plt.subplots(4,3, sharex=True, sharey=True, figsize=(10,24))
plt.subplots_adjust(hspace=0.30) #wspace=0.3, hspace=0.2)
ax = ax.reshape(ax.size)
if 'cat' in df.columns:
df = df.loc[df.cat=='pair_ints']
for i, bias in enumerate(BIAS_GROUPS):
if n:
sns.scatterplot(x=X, y=Y, data=df.loc[df.n_notes==n], ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.n_notes==n)&(df.bias_group==bias)], ax=ax[i])
else:
sns.scatterplot(x=X, y=Y, data=df, ax=ax[i], alpha=0.5)
sns.scatterplot(x=X, y=Y, data=df.loc[(df.bias_group==bias)], ax=ax[i])
ax[i].set_title(bias)
# if i%2 == 0:
# ax[i].set_ylabel(r'$f_{real}$')
# plt.tight_layout()
# ax[0].set_xlim(df[X].min()*0.8, df[X].max()*1.2)
# ax[0].set_ylim(df[Y].min()*0.8, df[Y].max()*1.2)
if save:
plt.savefig(FIGS_DIR + 'model_comparison.pdf', bbox_inches='tight', pad_inches=0)
plt.savefig(FIGS_DIR + 'model_comparison.png')
def plot_scale_histograms(df, df_real, i, nbin=100, X='scale', neg=1.0):
fig, ax = plt.subplots()
n = df.loc[i, 'n_notes']
bins =
|
np.linspace(0, 1200, num=nbin+1)
|
numpy.linspace
|
"""This module contains tests of the simulations module."""
import os
import math
import numba
import pickle
import numpy as np
from numba import cuda
import numpy.testing as npt
from scipy.stats import normaltest, kstest
from numba.cuda.random import (
create_xoroshiro128p_states,
xoroshiro128p_normal_float64,
)
from .. import gradients, simulations, substrates, utils
def load_example_gradient():
T = 80e-3
gradient = np.zeros((1, 100, 3))
gradient[0, 1:11, 0] = 1
gradient[0, -11:-1, 0] = -1
dt = T / (gradient.shape[1] - 1)
return gradient, dt
def test__cuda_dot_product():
@cuda.jit()
def test_kernel(a, b, dp):
thread_id = cuda.grid(1)
if thread_id >= a.shape[0]:
return
dp[thread_id] = simulations._cuda_dot_product(a[thread_id, :], b[thread_id, :])
return
np.random.seed(123)
for _ in range(100):
a = (np.random.random(3) - 0.5)[np.newaxis, :]
b = (np.random.random(3) - 0.5)[np.newaxis, :]
dp = np.zeros(1)
stream = cuda.stream()
test_kernel[1, 128, stream](a, b, dp)
stream.synchronize()
npt.assert_almost_equal(dp[0],
|
np.dot(a[0], b[0])
|
numpy.dot
|
from dataset import LandmarksDataset, SiameseDataset
from loss import ContrastiveLoss
import torch
import torchvision
from torch import optim, nn
from torchvision import datasets, models, transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader, Subset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis
from sklearn.decomposition import PCA
def load_net(num_classes=2):
model_conv = torchvision.models.resnet18(pretrained=True)
# Disable params in original model
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = 2048 # Depends on network architecture
model_conv.fc = nn.Sequential(nn.Linear(num_ftrs, int(num_ftrs / 2)), \
nn.Sigmoid(), \
nn.Linear(int(num_ftrs / 2), num_classes))
# Set model_conv to net
net = model_conv
return net, model_conv.fc.parameters()
class Trainer:
"""CNN Trainer with train and validation split support.
Examples:
Trainer can train and test, given both datasets.
trainer = Trainer(train_dataset, test_dataset, model)
trainer.train(num_epochs=10)
trainer.test()
Attributes:
"""
def __init__(self, dataset, model, model_parameters, batch_size=16, doValidation=True,
lr=0.001, lrs_step_size=10, lrs_gamma=0.1, shuffle=True):
# Load data
self.dataset = dataset
if doValidation:
self.train_dataset, self.val_dataset = self.split_dataset(dataset, validation_split=validation_split)
self.siamese_train_dataset, self.siamese_val_dataset = SiameseDataset(self.train_dataset), SiameseDataset(self.val_dataset)
self.train_loader = self.load_data(self.siamese_train_dataset, batch_size=batch_size, shuffle=shuffle)
self.val_loader = self.load_data(self.siamese_val_dataset, batch_size=batch_size, shuffle=shuffle)
else:
self.train_dataset = self.dataset
self.siamese_train_dataset = SiameseDataset(self.train_dataset)
self.train_loader = self.load_data(self.siamese_train_dataset, batch_size=batch_size, shuffle=shuffle)
# Load model
self.model, self.model_parameters = model, model_parameters
self.margin = 0.5
self.criterion = ContrastiveLoss(margin=self.margin) # For Siamese Learning
self.optimizer = optim.Adam(self.model_parameters, lr=lr)
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lrs_step_size, gamma=lrs_gamma)
# Hyperparameters
self.batch_size = batch_size
# Set flags
self.doValidation = doValidation
def split_dataset(self, dataset, shuffle_val_split=True, validation_split=0.2, random_seed=42):
"""Splits dataset into train and validation"""
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_val_split:
|
np.random.seed(random_seed)
|
numpy.random.seed
|
import scipy.io
from tqdm import tqdm
import pickle
import numpy as np
import sys
import math
from scipy.linalg import lstsq
from sklearn.model_selection import KFold, permutation_test_score
import argparse
import os
import helper
from scipy.stats import spearmanr
from sklearn.linear_model import Ridge, RidgeCV
import scipy.stats as stats
# import statsmodels.api as sm
def pad_along_axis(array, target_length, axis=0):
pad_size = target_length - array.shape[axis]
axis_nb = len(array.shape)
if pad_size < 0:
return array
npad = [(0, 0) for x in range(axis_nb)]
npad[axis] = (0, pad_size)
b = np.pad(array, pad_width=npad, mode='constant', constant_values=0)
return b
def get_voxel_number(args, CHUNK_SIZE, i):
return args.batch_num * CHUNK_SIZE + i
def get_dimensions(data):
return int(data[0])+1, int(data[1]), int(data[2])
def all_activations_for_all_sentences(modified_activations, volmask, embed_matrix, args, radius=5, kfold_split=5, alpha=1):
global temp_file_name
ACTIVATION_SHAPE = (240, 515)
print("getting activations for all sentences...")
res_per_spotlight = []
predictions = []
rankings = []
llhs = []
# pvalues = []
alphas = []
a,b,c = volmask.shape
nonzero_pts = np.transpose(np.nonzero(volmask))
true_spotlights = []
CHUNK = helper.chunkify(nonzero_pts, args.batch_num, args.total_batches)
CHUNK_SIZE = len(CHUNK)
# iterate over spotlight
print("for each spotlight...")
index=0
nn_matrix = calculate_dist_matrix(embed_matrix) if args.rsa else None
for pt in tqdm(CHUNK):
# SPHERE MASK BELOW
sphere_mask = np.zeros((a,b,c))
x1,y1,z1 = pt
# points_glm.append(pt)
for i in range(-radius, radius+1):
for j in range(-radius, radius+1):
for k in range(-radius, radius+1):
xp = x1 + i
yp = y1 + j
zp = z1 + k
pt2 = [xp,yp,zp]
if 0 <= xp and 0 <= yp and 0 <= zp and xp < a and yp < b and zp < c:
dist = math.sqrt(i ** 2 + j ** 2 + k ** 2)
if pt2 in nonzero_pts and dist <= radius:
sphere_mask[x1+i][y1+j][z1+k] = 1
# SPHERE MASK ABOVE
spotlights = []
spotlight_mask = []
# iterate over each sentence
for sentence_act in modified_activations:
spot = sentence_act[sphere_mask.astype(bool)]
remove_nan = np.nan_to_num(spot).astype(np.float32)
spotlights.append(remove_nan)
# spotlight_mask.append(sphere_mask.astype(bool))
print(np.array(spotlights).shape)
true_spotlights.append(spotlights)
# boolean_masks.append(spotlight_mask)
## DECODING BELOW
if args.rsa:
res = rsa(nn_matrix, np.array(spotlights))
else:
res, pred, llh, rank, alpha = linear_model(embed_matrix, spotlights, args, kfold_split, alpha)
predictions.append(pred)
llhs.append(llh)
rankings.append(rank)
# pvalues.append(pval)
alphas.append(alpha)
# print("RES for SPOTLIGHT #", index, ": ", res)
# print("RANK : " + str(rank))
res_per_spotlight.append(res)
index+=1
## DECODING ABOVE
return res_per_spotlight, llhs, rankings, alphas
def standardize(X):
return np.nan_to_num((X - np.mean(X, axis=0)) / np.std(X, axis=0))
def calculate_dist_matrix(matrix_embeddings):
n = matrix_embeddings.shape[0]
mat = np.zeros(shape=(n*(n-1)//2,))
cosine_sim = lambda x, y: np.dot(x, y) / (np.linalg.norm(x, ord=2) * np.linalg.norm(y, ord=2))
it = 0
for i in range(n):
for j in range(i):
mat[it] = cosine_sim(matrix_embeddings[i], matrix_embeddings[j])
it += 1
return mat
def rsa(embed_matrix, spotlights):
spotlight_mat = calculate_dist_matrix(spotlights)
corr, _ = spearmanr(spotlight_mat, embed_matrix)
return corr
def find_log_pdf(arr, sigmas):
val = stats.norm.logpdf(arr, 0, sigmas)
# return np.ma.masked_invalid(val).sum()
return np.nansum(val)
def vectorize_llh(pred, data, sigmas):
residuals =
|
np.subtract(data, pred)
|
numpy.subtract
|
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import random
import math
# Python3 code for generating points on a 3-D line
# using Bresenham's Algorithm
# source: https://www.geeksforgeeks.org/bresenhams-algorithm-for-3-d-line-drawing/
def bresenham_3d(x1, y1, z1, x2, y2, z2):
ListOfPoints = []
ListOfPoints.append((x1, y1, z1))
dx = abs(x2 - x1)
dy = abs(y2 - y1)
dz = abs(z2 - z1)
if (x2 > x1):
xs = 1
else:
xs = -1
if (y2 > y1):
ys = 1
else:
ys = -1
if (z2 > z1):
zs = 1
else:
zs = -1
# Driving axis is X-axis"
if (dx >= dy and dx >= dz):
p1 = 2 * dy - dx
p2 = 2 * dz - dx
while (x1 != x2):
x1 += xs
if (p1 >= 0):
y1 += ys
p1 -= 2 * dx
if (p2 >= 0):
z1 += zs
p2 -= 2 * dx
p1 += 2 * dy
p2 += 2 * dz
ListOfPoints.append((x1, y1, z1))
# Driving axis is Y-axis"
elif (dy >= dx and dy >= dz):
p1 = 2 * dx - dy
p2 = 2 * dz - dy
while (y1 != y2):
y1 += ys
if (p1 >= 0):
x1 += xs
p1 -= 2 * dy
if (p2 >= 0):
z1 += zs
p2 -= 2 * dy
p1 += 2 * dx
p2 += 2 * dz
ListOfPoints.append((x1, y1, z1))
# Driving axis is Z-axis"
else:
p1 = 2 * dy - dz
p2 = 2 * dx - dz
while (z1 != z2):
z1 += zs
if (p1 >= 0):
y1 += ys
p1 -= 2 * dz
if (p2 >= 0):
x1 += xs
p2 -= 2 * dz
p1 += 2 * dy
p2 += 2 * dx
ListOfPoints.append((x1, y1, z1))
return ListOfPoints
img_names = ["AICS-10_48_4.ome.tif"]
# 5: <NAME>
# 0: observed membrana
# 7: <NAME>(membrana)
img = io.imread("./images/" + img_names[0])
cell = img[:, 7, :, :]
cell_filled_in = img[:, 5, :, :]
import numpy as np
from scipy import signal
import numpy as np
from scipy import signal
# # first build the smoothing kernel
# sigma = 0.01 # width of kernel
# x = np.arange(-3,4,1) # coordinate arrays -- make sure they contain 0!
# y = np.arange(-3,4,1)
# z = np.arange(-3,4,1)
# xx, yy, zz = np.meshgrid(x,y,z)
# kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))
#
#
# cell = signal.convolve(cell, kernel, mode="same")
print(cell.shape)
zs, ys, xs = np.nonzero(cell)
all_cell_point_locations = []
for x, y, z in zip(xs, ys, zs):
all_cell_point_locations.append((x, y, z))
all_cell_point_locations_np = np.asarray(all_cell_point_locations)
print(all_cell_point_locations_np.shape)
mean = np.sum(all_cell_point_locations_np, axis=0) / all_cell_point_locations_np.shape[0]
num_points = 1000
turn_fraction = (1.0 + math.sqrt(5.0)) / 2.0
indices =
|
np.arange(0, num_points, dtype=float)
|
numpy.arange
|
#!/usr/bin/env python
import numpy as np
import sys
import scipy
from functools import partial
import bovy_coords as bc
import myutils
import os
import math
import gzip
import argparse
#############################################################################
#Copyright (c) 2013 - 2014, <NAME>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
#OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
#AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
#WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
#############################################################################
'''
NAME:
mgc3_lib
PURPOSE:
Library containing class definitions necessary for mgc3 pole counting
HISTORY:
2014-04-11 - Included optional flag to select method when extracting stars associated to poles
2014-04-04 - Standardize for distribution
2013-11-21 - Added ngc3 pole counts attribute to pole-grid object (mgc3, without radial velocity term)
2013-10-xx - Fractional area per pole computation added
2013-09-20 - Added gc3 pole counts attribute to pole-grid object
2013-09-xx - Written. <NAME> (CIDA,IA-UNAM Ensenada)
VERSION:
0.4
'''
global d2r
d2r=np.pi/180.
def read_inputcat_for_mgc3(filename,pardic=None):
#Open file
if '.gz' in filename:
inputfile=gzip.open(filename,'r')
filename=filename.replace('.gz','')
else: inputfile=open(filename,'r')
if 'csv' in filename:
obsdata = scipy.genfromtxt(inputfile,delimiter=',')
else:
obsdata = scipy.genfromtxt(inputfile,comments='#')
#Deal with one-line files
if np.ndim(obsdata)==1: obsdata=np.reshape(obsdata,(1,obsdata.size))
#Do cuts
mask = obsdata[:,0]==obsdata[:,0] #Initialize mask to all-True-vector
if pardic:
for NAUX in range(1,pardic['NAUX']+1,1):
mykey_col='AUX%d_col' % (NAUX)
mykey_valo='AUX%d_o' % (NAUX)
mykey_valf='AUX%d_f' % (NAUX)
#Skip if col=998
if pardic[mykey_col]!=998:
print(' Cutting input catalogue with %.1f<=%s[%d]<%.1f' % (pardic[mykey_valo],mykey_col,pardic[mykey_col]+1,pardic[mykey_valf]))
#Create mask
mask_i = (obsdata[:,pardic[mykey_col]]>pardic[mykey_valo]) & (obsdata[:,pardic[mykey_col]]<=pardic[mykey_valf])
#Combine masks
mask = mask & mask_i
#Apply mask
obsdata=obsdata[mask,:]
#Return data
return (obsdata,filename)
def equatorial2galactic_helio(alpha,delta):
#This coordinate transformation assumes ra,dec are in the FK4 (B1950) equinox
d2r=np.pi/180.
lo,bo=33.*d2r,62.6*d2r
alphao=282.25*d2r
#Shortcuts
sinbo,cosbo=np.sin(bo),np.cos(bo)
sindelta,cosdelta=np.sin(delta),np.cos(delta)
sinalpha1,cosalpha1=np.sin(alpha-alphao),np.cos(alpha-alphao)
#Get latitude
sinb=sindelta*cosbo - cosdelta*sinalpha1*sinbo
cosb=np.sqrt(1-sinb**2) #This is ok since cosb>0 for all b in [-pi/2,+pi/2]
b=np.arctan2(sinb,cosb)
#Get longitude
sinl1=(1./cosb)*(sindelta*sinbo+cosdelta*sinalpha1*cosbo)
cosl1=(1./cosb)*(cosdelta*cosalpha1)
#tangent of half-angle
tanlhalf=sinl1/(1.+cosl1)
l=2*np.arctan(tanlhalf)
l=l % (2*np.pi)
l=lo + l
return (l,b)
def print_sample_parfile():
s='''#=============================================================================================
#Paramameter file (NOTE: Column numbers are Fortran-style, i.e. 1 is the first column)
#=============================================================================================
#deg = If True, lat,lon in degrees. If False, radians
#lon_col = Longitude column
#lat_col = Latitude column
#coo_glactc = If True, lat,lot AND corresponding proper motions
# assumed to be galactic (l,b), if not, assumed to be equatorial (RA,DEC)
#par_col = Parallax column
#par_muas = If True units for parallax assumed to be muas, if False mas
#---------------------------------
#pm_lon = Column for proper motion in longitude direction
#pm_lon_red = If True, mu_l is reduced proper motion (i.e. mu_l*cos(b))
#pm_lat_col = Column for proper motion in latitude direction
#vrad_col = Radial Velocity column
#pm_muas = If True units for proper motions assumed to be muas/yr, if False mas/yr
#---------------------------------
#tol_r = r-tolerance for mgc3 pole-counting
#tol_v = v-tolerance for mgc3 pole-counting
#tol_deg = If True, tolerances are assumed to be in degrees. If False, radians
#grid_step = Step (in same units as tol) for pole grid
#grid_lon_o = Initial lon for pole grid
#grid_lon_f = Final lon for pole grid
#grid_lat_o = Initial lat for pole grid
#grid_lat_f = Final lat for pole grid
#---------------------------------
#AUX1_col = Auxiliary column. Catalogue ata with AUX1_o<AUX1<AUX1_f will be selected
#AUX1_o = Any number of AUX? cols can be used.
#AUX1_f = For multiple AUX? columns, the criteria is combined with boolean AND
#----------------Coordinate params------------------
deg True
lon_col 1
lat_col 2
coo_glactc True
par_col 3
par_muas True
#----------------Proper motion params---------------
pm_lon_col 4
pm_lon_red True
pm_lat_col 5
pm_muas True
vrad_col 6
#----------------mGC3 params------------------------
tol_r 2.
tol_v 2.
tol_deg True
grid_step 2.0 #All grid parameters should be in degrees
grid_lon_o 0.
grid_lon_f 360.
grid_lat_o 0.
grid_lat_f 90.
#---------------Additional pars---------------------
#AUX1_col 7 #Auxiliary column. Only catalogue data with AUX1_o<AUX1_col<AUX1_f will be used
#AUX1_o 0. #Any number of AUX? cols can be used.
#AUX1_f 20. #For multiple AUX? columns, the criteria is combined with boolean AND
#----------------end------------------------'''
ofile=open('mgc3_sample.par','w')
ofile.write(s)
ofile.close()
class print_parfile_action(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print_sample_parfile()
sys.exit('Sample parfile mgc3_sample.par created')
#Parse parameter file
def parse_pars(parfile):
pf=scipy.genfromtxt(parfile,comments='#',dtype=str)
naux,dic=0,{}
for line in pf:
if 'col' in line[0]:
dic[line[0]]=int(line[1])-1 #So 1st column corresponds to 0 (input is human-readable or F-style)
if 'AUX' in line[0]: naux=naux+1 #Count how many auxiliary criteria are defined
elif 'tol' in line[0] and 'deg' not in line[0]: dic[line[0]]=float(line[1])
elif 'grid_' in line[0]: dic[line[0]]=float(line[1])
elif 'AUX' in line[0]: dic[line[0]]=float(line[1])
else: dic[line[0]]= line[1].lower() == 'true' #Convert to boolean
#Save number of auxialiry cols in dictionary
dic['NAUX']=naux
return dic
class my_constants(object):
#kpc km/s km/s km/s muas.kpc yr km/s
def __init__(self,rsun=8.5,Ugc_hel=10.3,Vgc_hel=232.6,Wgc_hel=5.9,Ap=1000.,Av=4.74047): #Schoenrich & Binney 2010
self.rsun=rsun
self.Ugc_hel=Ugc_hel
self.Vgc_hel=Vgc_hel
self.Wgc_hel=Wgc_hel
self.vsun=np.sqrt(self.Ugc_hel**2 + self.Vgc_hel**2 + self.Wgc_hel**2)
self.d2r=np.pi/180.
self.Ap=Ap
self.Av=Av
#---------------------------------------------------------------------------------------
# ini_polegrid: Iinitialize pole grid
#---------------------------------------------------------------------------------------
class pole_grid(my_constants):
'''This class creates a pole-grid object with pole-counts initialized to zero
Parameters
----------
poles : float or [array, array], optional
Pole grid parameters:
* If float, it is the grid spacing in degrees
* If [array,array], pole grid longitudes and latitudes in degrees (longs,lats=poles)
cst : my_constants object instance - optional. my_constants attributes are inherited by this class.
This objects contains the values for relevant constants for
mGC3 calculations, i.e. rsun, (U,V,W)_GC, Ap, Av. If not explicitly provided by the user
it will take an instance of the my_constance class using the default values
Returns
-------
P : object of pole-grid class
'''
def __init__(self,poles=2.,cst=None,pole_grid_dic=None,verbose=True):
#Inherit my_constants object attributes
if cst:
my_constants.__init__(self,rsun=cst.rsun,Ugc_hel=cst.Ugc_hel,Vgc_hel=cst.Vgc_hel,Wgc_hel=cst.Wgc_hel,Ap=cst.Ap,Av=cst.Av)
else:
my_constants.__init__(self)
#Initialize empty arrays
self.l,self.b=np.array([]),np.array([])
self.mgc3hel,self.np_mgc3, self.np_gc3=
|
np.array([])
|
numpy.array
|
import numpy as np
from sl1m.constants_and_tools import *
from numpy import array, asmatrix, matrix, zeros, ones
from numpy import array, dot, stack, vstack, hstack, asmatrix, identity, cross, concatenate
from numpy.linalg import norm
#~ from scipy.spatial import ConvexHull
#~ from hpp_bezier_com_traj import *
#~ from qp import solve_lp
from sl1m.planner import *
from sl1m.plot_plytopes import *
from sl1m.planner_l1 import *
floor2 = [[-3., 0.4 , 0. ], [-2.7 , 0.4, 0. ], [-2.7 , 0.1, 0. ], [-03., 0.1, 0. ], ]
floor = [[-0.23, 0.54 , 0. ], [-0.1 , 0.54, 0. ], [-0.1 , -0.46, 0. ], [-0.23, -0.46, 0. ], ]
step1 = [[ 0.05, 0.54 , 0.1 ], [0.25 , 0.54, 0.1], [0.25 , -0.46, 0.1 ], [ 0.05, -0.46, 0.1 ], ]
step2 = [[ 0.35, 0.54 , 0.2 ], [0.55 , 0.54, 0.2], [0.55 , -0.46, 0.2 ], [ 0.35, -0.46, 0.2 ], ]
step3 = [[ 0.65, 0.54 , 0.3 ], [0.85 , 0.54, 0.3], [0.85 , -0.46, 0.3 ], [ 0.65, -0.46, 0.3 ], ]
step4 = [[ 0.95, 0.54 , 0.4 ], [1.15 , 0.54, 0.4], [1.15 , -0.46, 0.4 ], [ 0.95, -0.46, 0.4 ], ]
step5 = [[ 1.25, 0.54 , 0.5 ], [1.45 , 0.54, 0.5], [1.45 , -0.46, 0.5 ], [ 1.25, -0.46, 0.5 ], ]
step6 = [[ 1.55, 0.54 , 0.6 ], [1.75 , 0.54, 0.6], [1.75 , -0.46, 0.6 ], [ 1.55, -0.46, 0.6 ], ]
#~ step7 = [[ 1.51, 0.94 , 0.6 ], [2.51 , 0.94, 0.6], [2.51 , -1.06, 0.6 ], [ 1.51, -1.06, 0.6 ], ]
step7 = [[ 1.51,-0.46 , 0.6 ], [1.81 , -0.46, 0.6], [1.81 , -0.76, 0.6 ], [ 1.51, -0.76, 0.6 ], ]
bridge = [[ 1.51, -0.46 , 0.6 ], [1.51 , -0.76, 0.6], [-1.49, -0.76, 0.6 ], [-1.49, -0.46, 0.6 ], ]
#~ platfo = [[-1.49, -0.06 , 0.6 ], [-1.49, -1.06, 0.6], [-2.49, -1.06, 0.6 ], [-2.49, -0.06, 0.6 ], ]
platfo = [[-1.49, -0.35, 0.6 ], [-1.49, -1.06, 0.6], [-2.49, -1.06, 0.6 ], [-2.49, -0.35, 0.6 ], ]
#~ step8 = [[-1.49, -0.06 , 0.45], [-1.49, 0.24, 0.45],[-2.49, 0.24, 0.45], [-2.49, -0.06, 0.45], ]
#~ step9 = [[-1.49, 0.24 , 0.30], [-1.49, 0.54, 0.30],[-2.49, 0.54, 0.30], [-2.49, 0.24, 0.30], ]
#~ step10= [[-1.49, 0.54 , 0.15], [-1.49, 0.84, 0.15],[-2.49, 0.84, 0.15], [-2.49, 0.54, 0.15], ]
slope = [[-1.49, -0.06 , 0.6 ], [-1.49, 1.5, 0.], [-2.49, 1.5, 0. ], [-2.49, -0.06, 0.6 ], ]
rub2 = [[ -2.11, 0.19 , 0.05 ], [-2.45 , 0.19, 0.05 ], [ -2.45, 0.45, 0.05 ], [-2.11, 0.45, 0.05 ], ]
rub3 = [[ -1.91, -0.05 , 0.1 ], [-2.25 , -0.05, 0.1 ], [ -2.25, 0.18, 0.1 ], [-1.91, 0.18, 0.1 ], ]
rub4 = [[ -1.69, 0.19 , 0.15 ], [-2.03 , 0.19, 0.15 ], [ -2.03, 0.45, 0.15 ], [-1.69, 0.45, 0.15 ], ]
rub5 = [[ -1.49, -0.05 , 0.2 ], [-1.83 , -0.05, 0.2 ], [ -1.83, 0.18, 0.2 ], [-1.49, 0.18, 0.2 ], ]
rub6 = [[ -1.29, 0.19 , 0.2 ], [-1.63 , 0.19, 0.2 ], [ -1.63, 0.45, 0.2 ], [-1.29, 0.45, 0.2 ], ]
rub7 = [[ -1.09, -0.05 , 0.15 ], [-1.43 , -0.05, 0.15], [ -1.43, 0.18, 0.15], [-1.09, 0.18, 0.15 ], ]
rub75 = [[ -0.89, 0.19 , 0.1 ], [-1.23 , 0.19, 0.1], [ -1.23, 0.45, 0.1], [-0.89, 0.45, 0.1 ], ]
rub8 = [[ -0.89, -0.05 , 0.025 ], [-1.02 , -0.05, 0.025], [ -1.02, 0.18, 0.025], [-0.89, 0.18, 0.025 ], ]
rub9 = [[ -0.35, -0.05 , 0.025 ], [-0.86 , -0.05, 0.025], [-0.86, 0.45, 0.025 ], [ -0.35, 0.45, 0.025], ]
rub8 = [[ -0.89, -0.05 , 0.05 ], [-1.02 , -0.05, 0.05], [ -1.02, 0.18, 0.05], [-0.89, 0.18, 0.05 ], ]
rub9 = [[ -0.45, -0.05 , 0.05 ], [-0.86 , -0.05, 0.05], [-0.86, 0.45, 0.05 ], [ -0.45, 0.45, 0.05], ]
all_surfaces = [floor2, floor, step1, step2, step3, step4,step5,step6, step7, bridge, platfo, rub8, rub9,rub7, rub75, rub6, rub5, rub4, rub3, rub2]
arub9 = array(rub9).T
arub8 = array(rub8).T
arub75 = array(rub75).T
arub7 = array(rub7).T
arub6 =
|
array(rub6)
|
numpy.array
|
# @author: <NAME>, <NAME>, <NAME>
import copy
from collections import Counter
from . import config
config.init(0)
import numpy
import numpy as np
from mpi4py import MPI
class determine_block_params():
"""Computes the parameters for each chunk to be read by MPI process
Parameters
----------
comm : object
MPI communicator object
pgrid : tuple
Cartesian grid configuration
shape : tuple
Data shape
"""
def __init__(self, comm, pgrid, shape):
if type(comm) == int:
self.rank = comm
else:
self.rank = comm.rank
self.pgrid = pgrid
self.shape = shape
def determine_block_index_range_asymm(self):
'''Determines the start and end indices for the Data block for each rank'''
chunk_ind = np.unravel_index(self.rank, self.pgrid)
start_inds = [i * (n // k) + min(i, n % k) for n, k, i in zip(self.shape, self.pgrid, chunk_ind)]
end_inds = [(i + 1) * (n // k) + min((i + 1), n % k) - 1 for n, k, i in zip(self.shape, self.pgrid, chunk_ind)]
return start_inds, end_inds
def determine_block_shape_asymm(self):
'''Determines the shape for the Data block for each rank'''
start_inds, end_inds = self.determine_block_index_range_asymm()
return [(j - i + 1) for (i, j) in zip(start_inds, end_inds)]
class data_operations():
"""Performs various operations on the data
Parameters
----------
data : ndarray
Data to operate on"""
def __init__(self, data):
self.ten = data
def cutZero(self, thresh=1e-8):
"""Prunes zero columns from the data"""
tenS = list(self.ten.shape)
dim = len(tenS)
axSum = []
axSel = []
axInd = []
for curD in range(dim):
axisList = list(range(len(self.ten.shape)))
axisList.pop(curD)
axSum.append(numpy.sum(self.ten, axis=tuple(axisList)))
axSel.append(axSum[-1] > thresh)
# Move Axis to front and index
self.ten = self.ten.swapaxes(curD, 0)
self.ten = self.ten[axSel[-1]]
self.ten = self.ten.swapaxes(0, curD)
# Build Reconstruction Index
axInd.append(list(numpy.nonzero(axSel[-1])[0]))
axInd[-1].append(tenS[curD])
return (self.ten, axInd)
def recZero(self, indexList):
# Note indexList is partially destroyed
tenS = []
sliceList = []
for curI, curList in enumerate(indexList):
tenS.append(curList.pop(-1))
sliceList.append(slice(0, ten.shape[curI], 1))
sliceObj = tuple(sliceList)
tenR =
|
numpy.zeros(tenS, dtype=self.ten.dtype)
|
numpy.zeros
|
"""
All tests are copied from AllenNLP.
"""
import torch
import pytest
import numpy
from pytorch_fast_elmo import ScalarMix
def test_scalar_mix_can_run_forward():
mixture = ScalarMix(3)
tensors = [torch.randn([3, 4, 5]) for _ in range(3)]
for k in range(3):
mixture.scalar_parameters[k].data[0] = 0.1 * (k + 1)
mixture.gamma.data[0] = 0.5
result = mixture(tensors)
weights = [0.1, 0.2, 0.3]
normed_weights =
|
numpy.exp(weights)
|
numpy.exp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.