prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
u"""
<NAME>' - 03/2022
TEST:
Estimate and Remove the contribution of a "Linear Ramp" to the Wrapped Phase
of a Differential InSAR Interferogram.
usage: rm_phase_ramp.py [-h] [--par PAR] in_interf
positional arguments:
in_interf Input Interferogram - Absolute Path
optional arguments:
-h, --help show this help message and exit
--par PAR, -P PAR Interferogram Parameter File
NOTE: In this implementation of the algorithm, a first guess or preliminary
estimate of the parameters defining the ramp must be provided by the user.
These parameters include the number of phase cycles characterizing the ramp
in the X and Y (columns and rows) directions of the input raster.
A GRID SEARCH around the user-defined first guess is performed to obtain the
best estimate of the ramp parameters.
PYTHON DEPENDENCIES:
argparse: Parser for command-line options, arguments and sub-commands
https://docs.python.org/3/library/argparse.html
numpy: The fundamental package for scientific computing with Python
https://numpy.org/
matplotlib: Visualization with Python
https://matplotlib.org/
tqdm: Progress Bar in Python.
https://tqdm.github.io/
datetime: Basic date and time types
https://docs.python.org/3/library/datetime.html#module-datetime
py_gamma: GAMMA's Python integration with the py_gamma module
UPDATE HISTORY:
"""
# - Python dependencies
from __future__ import print_function
import os
import argparse
import datetime
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from pathlib import Path
# - GAMMA's Python integration with the py_gamma module
import py_gamma as pg
from utils.read_keyword import read_keyword
from utils.make_dir import make_dir
def estimate_phase_ramp(dd_phase_complex: np.ndarray, cycle_r: int,
cycle_c: int, slope_r: int = 1, slope_c: int = 1,
s_radius: float = 2, s_step: float = 0.1) -> dict:
"""
Estimate a phase ramp from the provided input interferogram
:param dd_phase_complex: interferogram phase expressed as complex array
:param cycle_r: phase ramp number of cycles along rows
:param cycle_c: phase ramp number of cycles along columns
:param slope_r: phase ramp slope sign - rows axis
:param slope_c: phase ramp slope sign - columns axis
:param s_radius: grid search domain radius
:param s_step: grid search step
:return: Python dictionary containing the results of the grid search
"""
# - Generate synthetic field domain
array_dim = dd_phase_complex.shape
n_rows = array_dim[0]
n_columns = array_dim[1]
raster_mask = np.ones(array_dim)
raster_mask[np.isnan(dd_phase_complex)] = 0
# - Integration Domain used to define the phase ramp
xx_m, yy_m = np.meshgrid(np.arange(n_columns), np.arange(n_rows))
if s_radius > 0:
if cycle_r - s_radius <= 0:
n_cycle_r_vect_f = np.arange(s_step, cycle_r + s_radius + s_step,
s_step)
else:
n_cycle_r_vect_f = np.arange(cycle_r - s_radius,
cycle_r + s_radius + s_step,
s_step)
if cycle_c - s_radius <= 0:
n_cycle_c_vect_f = np.arange(s_step, cycle_c + s_radius + s_step,
s_step)
else:
n_cycle_c_vect_f = np.arange(cycle_c - s_radius,
cycle_c + s_radius + s_step,
s_step)
# - Create Grid Search Domain
error_array_f = np.zeros([len(n_cycle_r_vect_f), len(n_cycle_c_vect_f)])
for r_count, n_cycle_r in tqdm(enumerate(list(n_cycle_r_vect_f)),
total=len(n_cycle_r_vect_f), ncols=60):
for c_count, n_cycle_c in enumerate(list(n_cycle_c_vect_f)):
synth_real = slope_c * (2 * np.pi / n_columns) \
* n_cycle_c * xx_m
synth_imag = slope_r * (2 * np.pi / n_rows) \
* n_cycle_r * yy_m
synth_phase_plane = synth_real + synth_imag
synth_complex = np.exp(1j * synth_phase_plane)
# - Compute Complex Conjugate product between the synthetic
# - phase ramp and the input interferogram.
dd_phase_complex_corrected \
= np.angle(dd_phase_complex * np.conj(synth_complex))
# - Compute the Mean Absolute value of the phase residuals
# - > Mean Absolute Error
error = np.abs(dd_phase_complex_corrected)
mae = | np.nansum(error) | numpy.nansum |
"""This module contains definitions and data structures for 2-, 4-, and 8-valued logic operations.
8 logic values are defined as integer constants.
* For 2-valued logic: ``ZERO`` and ``ONE``
* 4-valued logic adds: ``UNASSIGNED`` and ``UNKNOWN``
* 8-valued logic adds: ``RISE``, ``FALL``, ``PPULSE``, and ``NPULSE``.
The bits in these constants have the following meaning:
* bit 0: Final/settled binary value of a signal
* bit 1: Initial binary value of a signal
* bit 2: Activity or transitions are present on a signal
Special meaning is given to values where bits 0 and 1 differ, but bit 2 (activity) is 0.
These values are interpreted as ``UNKNOWN`` or ``UNASSIGNED`` in 4-valued and 8-valued logic.
In general, 2-valued logic only considers bit 0, 4-valued logic considers bits 0 and 1, and 8-valued logic
considers all 3 bits.
The only exception is constant ``ONE=0b11`` which has two bits set for all logics including 2-valued logic.
"""
import math
from collections.abc import Iterable
import numpy as np
from . import numba, hr_bytes
ZERO = 0b000
"""Integer constant ``0b000`` for logic-0. ``'0'``, ``0``, ``False``, ``'L'``, and ``'l'`` are interpreted as ``ZERO``.
"""
UNKNOWN = 0b001
"""Integer constant ``0b001`` for unknown or conflict. ``'X'``, or any other value is interpreted as ``UNKNOWN``.
"""
UNASSIGNED = 0b010
"""Integer constant ``0b010`` for unassigned or high-impedance. ``'-'``, ``None``, ``'Z'``, and ``'z'`` are
interpreted as ``UNASSIGNED``.
"""
ONE = 0b011
"""Integer constant ``0b011`` for logic-1. ``'1'``, ``1``, ``True``, ``'H'``, and ``'h'`` are interpreted as ``ONE``.
"""
PPULSE = 0b100
"""Integer constant ``0b100`` for positive pulse, meaning initial and final values are 0, but there is some activity
on a signal. ``'P'``, ``'p'``, and ``'^'`` are interpreted as ``PPULSE``.
"""
RISE = 0b101
"""Integer constant ``0b110`` for a rising transition. ``'R'``, ``'r'``, and ``'/'`` are interpreted as ``RISE``.
"""
FALL = 0b110
"""Integer constant ``0b101`` for a falling transition. ``'F'``, ``'f'``, and ``'\\'`` are interpreted as ``FALL``.
"""
NPULSE = 0b111
"""Integer constant ``0b111`` for negative pulse, meaning initial and final values are 1, but there is some activity
on a signal. ``'N'``, ``'n'``, and ``'v'`` are interpreted as ``NPULSE``.
"""
def interpret(value):
"""Converts characters, strings, and lists of them to lists of logic constants defined above.
:param value: A character (string of length 1), Boolean, Integer, None, or Iterable.
Iterables (such as strings) are traversed and their individual characters are interpreted.
:return: A logic constant or a (possibly multi-dimensional) list of logic constants.
"""
if isinstance(value, Iterable) and not (isinstance(value, str) and len(value) == 1):
return list(map(interpret, value))
if value in [0, '0', False, 'L', 'l']:
return ZERO
if value in [1, '1', True, 'H', 'h']:
return ONE
if value in [None, '-', 'Z', 'z']:
return UNASSIGNED
if value in ['R', 'r', '/']:
return RISE
if value in ['F', 'f', '\\']:
return FALL
if value in ['P', 'p', '^']:
return PPULSE
if value in ['N', 'n', 'v']:
return NPULSE
return UNKNOWN
_bit_in_lut = np.array([2 ** x for x in range(7, -1, -1)], dtype='uint8')
@numba.njit
def bit_in(a, pos):
return a[pos >> 3] & _bit_in_lut[pos & 7]
class MVArray:
"""An n-dimensional array of m-valued logic values.
This class wraps a numpy.ndarray of type uint8 and adds support for encoding and
interpreting 2-valued, 4-valued, and 8-valued logic values.
Each logic value is stored as an uint8, manipulations of individual values are cheaper than in
:py:class:`BPArray`.
:param a: If a tuple is given, it is interpreted as desired shape. To make an array of ``n`` vectors
compatible with a simulator ``sim``, use ``(len(sim.interface), n)``. If a :py:class:`BPArray` or
:py:class:`MVArray` is given, a deep copy is made. If a string, a list of strings, a list of characters,
or a list of lists of characters are given, the data is interpreted best-effort and the array is
initialized accordingly.
:param m: The arity of the logic. Can be set to 2, 4, or 8. If None is given, the arity of a given
:py:class:`BPArray` or :py:class:`MVArray` is used, or, if the array is initialized differently, 8 is used.
"""
def __init__(self, a, m=None):
self.m = m or 8
assert self.m in [2, 4, 8]
# Try our best to interpret given a.
if isinstance(a, MVArray):
self.data = a.data.copy()
"""The wrapped 2-dimensional ndarray of logic values.
* Axis 0 is PI/PO/FF position, the length of this axis is called "width".
* Axis 1 is vector/pattern, the length of this axis is called "length".
"""
self.m = m or a.m
elif hasattr(a, 'data'): # assume it is a BPArray. Can't use isinstance() because BPArray isn't declared yet.
self.data = np.zeros((a.width, a.length), dtype=np.uint8)
self.m = m or a.m
for i in range(a.data.shape[-2]):
self.data[...] <<= 1
self.data[...] |= np.unpackbits(a.data[..., -i-1, :], axis=1)[:, :a.length]
if a.data.shape[-2] == 1:
self.data *= 3
elif isinstance(a, int):
self.data = np.full((a, 1), UNASSIGNED, dtype=np.uint8)
elif isinstance(a, tuple):
self.data = np.full(a, UNASSIGNED, dtype=np.uint8)
else:
if isinstance(a, str): a = [a]
self.data = np.asarray(interpret(a), dtype=np.uint8)
self.data = self.data[:, np.newaxis] if self.data.ndim == 1 else np.moveaxis(self.data, -2, -1)
# Cast data to m-valued logic.
if self.m == 2:
self.data[...] = ((self.data & 0b001) & ((self.data >> 1) & 0b001) | (self.data == RISE)) * ONE
elif self.m == 4:
self.data[...] = (self.data & 0b011) & ((self.data != FALL) * ONE) | ((self.data == RISE) * ONE)
elif self.m == 8:
self.data[...] = self.data & 0b111
self.length = self.data.shape[-1]
self.width = self.data.shape[-2]
def __repr__(self):
return f'<MVArray length={self.length} width={self.width} m={self.m} mem={hr_bytes(self.data.nbytes)}>'
def __str__(self):
return str([self[idx] for idx in range(self.length)])
def __getitem__(self, vector_idx):
"""Returns a string representing the desired vector."""
chars = ["0", "X", "-", "1", "P", "R", "F", "N"]
return ''.join(chars[v] for v in self.data[:, vector_idx])
def __len__(self):
return self.length
def mv_cast(*args, m=8):
return [a if isinstance(a, MVArray) else MVArray(a, m=m) for a in args]
def mv_getm(*args):
return max([a.m for a in args if isinstance(a, MVArray)] + [0]) or 8
def _mv_not(m, out, inp):
np.bitwise_xor(inp, 0b11, out=out) # this also exchanges UNASSIGNED <-> UNKNOWN
if m > 2:
np.putmask(out, (inp == UNKNOWN), UNKNOWN) # restore UNKNOWN
def mv_not(x1, out=None):
"""A multi-valued NOT operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1)
x1 = mv_cast(x1, m=m)[0]
out = out or MVArray(x1.data.shape, m=m)
_mv_not(m, out.data, x1.data)
return out
def _mv_or(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
any_one = (ins[0] == ONE)
for inp in ins[1:]: any_one |= (inp == ONE)
out[...] = ZERO
np.putmask(out, any_one, ONE)
for inp in ins:
np.bitwise_or(out, inp, out=out, where=~any_one)
np.putmask(out, (any_unknown & ~any_one), UNKNOWN)
else:
out[...] = ZERO
for inp in ins: np.bitwise_or(out, inp, out=out)
def mv_or(x1, x2, out=None):
"""A multi-valued OR operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param x2: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1, x2)
x1, x2 = mv_cast(x1, x2, m=m)
out = out or MVArray(np.broadcast(x1.data, x2.data).shape, m=m)
_mv_or(m, out.data, x1.data, x2.data)
return out
def _mv_and(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
any_zero = (ins[0] == ZERO)
for inp in ins[1:]: any_zero |= (inp == ZERO)
out[...] = ONE
np.putmask(out, any_zero, ZERO)
for inp in ins:
np.bitwise_and(out, inp | 0b100, out=out, where=~any_zero)
if m > 4: np.bitwise_or(out, inp & 0b100, out=out, where=~any_zero)
np.putmask(out, (any_unknown & ~any_zero), UNKNOWN)
else:
out[...] = ONE
for inp in ins: np.bitwise_and(out, inp, out=out)
def mv_and(x1, x2, out=None):
"""A multi-valued AND operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param x2: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1, x2)
x1, x2 = mv_cast(x1, x2, m=m)
out = out or MVArray(np.broadcast(x1.data, x2.data).shape, m=m)
_mv_and(m, out.data, x1.data, x2.data)
return out
def _mv_xor(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
out[...] = ZERO
for inp in ins:
np.bitwise_xor(out, inp & 0b011, out=out)
if m > 4: np.bitwise_or(out, inp & 0b100, out=out)
np.putmask(out, any_unknown, UNKNOWN)
else:
out[...] = ZERO
for inp in ins: | np.bitwise_xor(out, inp, out=out) | numpy.bitwise_xor |
# NumPy is the fundamental package for scientific computing with Python.
# It contains among other things:
# a powerful N-dimensional array object
from gi.overrides.Gtk import Label
import numpy as np
import matplotlib.pyplot as plt
import oct2py as op
import itertools
from mpl_toolkits.mplot3d import axes3d
def DrawHexagon(P):
T=np.vstack([np.hstack([P[0:P.shape[0]-1]]),np.hstack([P[0,:]])])
ax.plot(T[:,0],T[:,1],T[:,2],'-b') #'-b' same color
return ;
def LinkDist(B,P) :
for i in range(0,7):
if(i==0) :
L=(B[i,:],P[6,:])
ax.plot(B[i,:],P[5,:]);
print (L)
else:
if(i==7) :
L = (B[i, :], P[i, :])
#ax.plot(B[i, :], P[i, :],'-b');
print (L)
else:
L = (B[i, :], P[i-1, :])
#ax.plot(B[i, :], P[i-1, :],'-b');
print (L)
return ;
'''def RotateZ(LPoints,T):
Temp=np.array(LPoints,)
print (Temp)
return ;'''
Theta = np.arange(0 , 360 , 120) #arrange into [0 120 240]
#print(Theta)
DTheta = 16.22
BRad = 141.78 #Base Radiues
PRad = 101.27 #Plateform Radiues
ConnRod = 218.2587
LinkRod = 20
Height = 200
Pos = 3 #x y z
PX = np.arange(-10 , 11)#mm
PY = np.arange(-10 , 11) #mm
PZ=np.arange(-5,6,0.5) #mm
RX=np.arange(-5,6,0.5) #from mpl_toolkits.mplot3d import axes3ddegree
RY=np.arange(-5,6,0.5) #degree
RZ=np.arange(-5,6,0.5)
BPC=np.zeros( (7,3) )#Base Plateform Coordinate
MPC=np.zeros( (7,3) ) #Moving Plateform Coordinate
LPC=np.zeros( (6,3) ) #Link Point Coordinate
BC=np.radians(np.sort(np.array([Theta ,Theta+DTheta]).ravel())) #BASE CIRCULE
#print (BC)
#PLATE CIRCULE
PC=np.radians(np.sort(np.array([Theta+60 ,Theta+DTheta+60]).ravel()))
#print (PC)
#Calculate the Base Platform Point and Base Point Position
BPC[0:6,:]=np.transpose(np.array([BRad * np.cos(BC),BRad * np.sin(BC),np.zeros((6))]))
#print (BPC);
#Height of Moving Platform
MPC[0:6,:]=np.transpose(np.array([PRad * np.cos(PC),PRad * np.sin(PC),Height * np.ones(6)]))
MPC[6,2]=Height
#print (MPC)
#Display the points in 3D
Point=(np.vstack([np.hstack([BPC]),np.hstack([MPC])]))#Base Points and Plateform Points
#print (Point)
fig=plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim(-500,500)
ax.set_ylim(-500,500)
ax.set_zlim(-500,500)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(Point[:,0],Point[:,1],Point[:,2])
#plt.plot(Point[:,0],Point[:,1],Point[:,2],'-o')
DrawHexagon(BPC)
DrawHexagon(MPC)
VirLink = LinkDist(BPC[0:7,:], MPC[0:7,:])
PlaneTheta=Theta+DTheta/2
#print (PlaneTheta)
PlaneNormal=np.transpose(np.array([BRad*np.cos(np.radians(PlaneTheta)),BRad*np.sin(np.radians(PlaneTheta)),np.zeros(3)]))
#print (PlaneNormal)
MotorPlane=np.zeros((3,5))
#print (MotorPlane)
'''for i=1:3
MotorPlane(i,:)=createPlane(BPC(i*2-1,:),PlaneNormal(i,:));
drawPlane3d(MotorPlane(i,:));
end'''
for i in range(0,3):
print (BPC[i*2,:])
print(PlaneNormal[i,:])
Phi=np.arange(0,90,0.25)
#print (Phi)
LC=np.radians(Phi)
#LPL=[ LinkRod.*cos(LC)' zeros(1,numel(Phi))' LinkRod.*sin(LC)' ];
LPL=np.transpose(np.array([LinkRod*np.cos(LC),np.zeros((1,361)),LinkRod* | np.sin(LC) | numpy.sin |
from torch.utils.data import Dataset
# import joblib
import numpy as np
import torch
from settings import segment_length, segment_slide
import _pickle as pickle
def load_file(filename):
with open(filename,'rb') as f:
data=pickle.load(f)
return data
def normalize(x, mean=[0.3, 0.6], std=[0.09, 0.13]):
mean_tensor = x.new_empty(x.size()).fill_(0)
mean_tensor.index_fill_(1, torch.tensor(0, device=x.device), mean[0])
mean_tensor.index_fill_(1, torch.tensor(1, device=x.device), mean[1])
std_tensor = x.new_empty(x.size()).fill_(0)
std_tensor.index_fill_(1, torch.tensor(0, device=x.device), std[0])
std_tensor.index_fill_(1, torch.tensor(1, device=x.device), std[1])
output = x - mean_tensor
output = torch.div(output, std_tensor)
return output
class TriageDataset(Dataset):
def __init__(self, data, labels=None, stft_fun=None, transforms=None, aug_raw=[], normalize=False, sample_by=None):
self.data = data
self.aug_raw = aug_raw
self.normalize = normalize
self.labels = labels
self.stft_fun = stft_fun
self.transforms = transforms
self.sample_by = sample_by
if self.sample_by:
self.unique_ids = self.data[self.sample_by].unique()
# self.client = MongoClient('mongodb://%s:%s@%s' % (Mongo.user.value, Mongo.password.value,Mongo.host.value),
# connect=False)
def __len__(self):
if self.sample_by:
return len(self.unique_ids)
return self.data.shape[0]
def __getitem__(self, item):
if self.sample_by:
rows = self.data.loc[self.data[self.sample_by] == self.unique_ids[item], :].reset_index()
sampled_row=torch.randperm(rows.shape[0],).numpy()[0]
row=rows.iloc[sampled_row, :]
else:
row = self.data.iloc[item, :]
# ppg = joblib.load(row['filename'])
ppg = load_file(row['filename'])
red, infrared = np.array(ppg["red"]), np.array(ppg["infrared"])
for aug in self.aug_raw:
red, infrared = aug(red, infrared)
if self.stft_fun:
red_stft = self.stft_fun(red)
infrared_stft = self.stft_fun(infrared)
x_stft = np.stack([red_stft, infrared_stft])
x_stft = torch.tensor(x_stft)
if self.normalize:
red = (red - 0.3) / 0.09
infrared = (infrared - 0.6) / 0.13
x = np.stack([red, infrared])
x = torch.tensor(x)
if self.transforms:
x_stft = self.transforms(x_stft)
if self.labels is None:
if self.stft_fun is None:
return x
return x, x_stft
else:
y = row[self.labels]
if self.stft_fun is None:
return x, torch.tensor([y, ])
return x, x_stft, torch.tensor([y, ])
class TriagePairs(Dataset):
def __init__(self, data, id_var='id', position_var='position', stft_fun=None, transforms=None, aug_raw=[],
overlap=False, normalize=False, pretext='sample'):
super().__init__()
self.data = data
self.id_var = id_var
self.position_var = position_var
self.ids = data[id_var].unique()
self.stft_fun = stft_fun
self.transforms = transforms
self.raw_aug = aug_raw
self.normalize = normalize
self.pretext = pretext
if overlap:
self.position_distance = 0
else:
self.position_distance = int(segment_length / segment_slide)
def __len__(self):
return len(self.ids)
def __getitem__(self, item):
rows = self.data.loc[self.data[self.id_var] == self.ids[item], :].reset_index()
sampled_row = torch.randperm(rows.shape[0], ).numpy()[0]
sample_position = rows.iloc[sampled_row][self.position_var]
if self.pretext == "augment":
sample_position2 = sample_position
elif self.pretext == "sample":
rows2 = rows.loc[(rows[self.position_var] < (sample_position - self.position_distance)) | (
rows[self.position_var] > (sample_position + self.position_distance))].reset_index()
sampled_row2 = torch.randperm(rows2.shape[0] ).numpy()[0]
sample_position2 = rows2.iloc[sampled_row2][self.position_var]
else:
raise NotImplementedError(f"Pretext task {self.pretext} not implemented")
sample_rows = rows.loc[rows[self.position_var].isin([sample_position, sample_position2])]
# ppg1 = joblib.load(sample_rows.iloc[0]['filename'])
ppg1 = load_file(sample_rows.iloc[0]['filename'])
if self.pretext == "augment":
ppg2 = ppg1
elif self.pretext == "sample":
# ppg2 = joblib.load(sample_rows.iloc[1]['filename'])
ppg2 = load_file(sample_rows.iloc[1]['filename'])
red1, infrared1 = ppg1["red"], ppg1["infrared"]
red2, infrared2 = ppg2["red"], ppg2["infrared"]
for aug in self.raw_aug:
red1, infrared1 = aug(red1, infrared1)
red2, infrared2 = aug(red2, infrared2)
if self.normalize:
red1 = (np.array(red1) - 0.3) / 0.09
infrared1 = (np.array(infrared1) - 0.6) / 0.13
red2 = (np.array(red2) - 0.3) / 0.09
infrared2 = (np.array(infrared2) - 0.6) / 0.13
x1 = torch.tensor(np.stack([red1, infrared1]))
x2 = torch.tensor(np.stack([red2, infrared2]))
if self.stft_fun:
stft_red1 = self.stft_fun(red1)
stft_infrared1 = self.stft_fun(infrared1)
stft_red2 = self.stft_fun(red2)
stft_infrared2 = self.stft_fun(infrared2)
stft_x1 = torch.tensor(np.stack([stft_red1, stft_infrared1]))
stft_x2 = torch.tensor( | np.stack([stft_red2, stft_infrared2]) | numpy.stack |
import numpy as np
import os, sys
import time
# PART 1 - READING IN SNAPSHOTS AND WRITING POD COEFFICIENTS ----------------------------------
def read_in_snapshots_and_write_out_POD_coeffs(snapshot_data_location, snapshot_file_base, nTime, nDim, field_name, G, cumulative_tol):
# read in snapshots from vtu files ------------------------------------------------------------
print('reading in snapshots from vtu files')
nNodes = get_nNodes_from_vtu(snapshot_data_location, snapshot_file_base )
snapshots_matrix = np.zeros((nDim*nNodes, nTime))
velocity = np.zeros((nNodes, nDim))
for iTime in range(nTime):
# iTime+1 excludes the initial condition (sometimes zero)
filename = snapshot_data_location + snapshot_file_base + str(iTime+1) + '.vtu'
vtu_data = vtktools.vtu(filename)
velocity = vtu_data.GetField(field_name)[:,0:nDim] # as 2D data appears as 3D data in vtu file
#snapshots_matrix[:nNodes,iTime] = velocity[:,0]
#snapshots_matrix[nNodes:2*nNodes,iTime] = velocity[:,1]
#if nDim==3:
# snapshots_matrix[2*nNodes:,iTime] = velocity[:,2]
snapshots_matrix[:,iTime] = velocity.reshape((nDim*nNodes),order='F')
# take SVD and output POD coeffs -----------------------------------------------------------
print('finding the POD coefficients of the snapshots')
# get basis functions and singular values (sing values probably not needed here)
s_values, basis_functions = get_POD_functions(snapshots_matrix, nPOD, cumulative_tol, nNodes)
# calculate POD coefficients
print('shape of basis_functions and snapshots', basis_functions.shape, snapshots_matrix.shape)
POD_coeffs = np.dot(np.transpose(basis_functions), snapshots_matrix)
print( 'shape of POD coeffs', POD_coeffs.shape)
# output POD coefficients to file
np.savetxt('POD_coeffs.csv', POD_coeffs , delimiter=',')
np.savetxt('basis_functions.csv', basis_functions , delimiter=',')
return
# PART 2 - READING IN PREDICTIONS OF POD COEFFICIENTS AND WRITING RESULTS --------------------
def read_in_ML_predictions_and_write_results(snapshot_data_location, snapshot_file_base):
# read in, apply inverse SVD and print out results -------------------------------------------
POD_coeffs_prediction = np.loadtxt('POD_coeffs.csv', delimiter=',') # _prediction
basis_functions = np.loadtxt('basis_functions.csv', delimiter=',')
prediction = | np.dot(basis_functions, POD_coeffs_prediction) | numpy.dot |
""" To conver dicom images into images needed for keras"""
from __future__ import print_function
import os
import numpy as np
import SimpleITK as sitk
import settings_dist
import cv2
import random
import time
def get_data_from_dir(data_dir):
"""
From a given folder (in the Brats2016 folder organization), returns the different
volumes corresponding to t1, t1c, f
"""
print ("Loading from", data_dir)
img_path = os.path.dirname(data_dir)
img_dir_fn = os.path.basename(data_dir)
t1_fn = ""
t1c_fn = ""
flair_fn = ""
t2_fn = ""
truth_fn = ""
fldr1_list = os.listdir(data_dir)
for fldr1 in fldr1_list:
fldr1_fn = os.path.join(img_path,img_dir_fn, fldr1)
if os.path.isdir(fldr1_fn):
fldr2_list = os.listdir(fldr1_fn)
for fldr2 in fldr2_list:
fn, ext = os.path.splitext(fldr2)
if ext == '.mha':
protocol_series = fldr1.split('.')[4]
protocol = protocol_series.split('_')[0]
if protocol == 'MR':
series = protocol_series.split('_')[1]
if series == 'T2':
t2_fn = os.path.join(img_path,img_dir_fn, fldr1, fldr2)
if series == 'Flair':
flair_fn = os.path.join(img_path, img_dir_fn, fldr1, fldr2)
if series == 'T1c':
t1c_fn = os.path.join(img_path,img_dir_fn, fldr1, fldr2)
if series == 'T1':
t1_fn = os.path.join(img_path,img_dir_fn, fldr1, fldr2)
else:
truth_fn = os.path.join(img_path,img_dir_fn, fldr1, fldr2)
#does the data have all the needed inputs: T1C, T2, Flair and truth, them use
isComplete = False
if len(t1c_fn)>0 and len(t1_fn) and len(flair_fn)>0 and len(t2_fn)>0 \
and len(truth_fn)>0:
isComplete = True
print (" T1 :", os.path.basename(t1_fn))
print (" T1c:", os.path.basename(t1c_fn))
print (" FLr:", os.path.basename(flair_fn))
print (" T2 :", os.path.basename(t2_fn))
print (" Tru:", os.path.basename(truth_fn))
# Read data
try:
t1 = sitk.ReadImage(t1_fn)
except Exception as e:
print (e)
t1 = sitk.Image()
try:
t1c = sitk.ReadImage(t1c_fn)
except Exception as e:
print (e)
t1c = sitk.Image()
try:
fl = sitk.ReadImage(flair_fn)
except Exception as e:
print (e)
fl = sitk.Image()
try:
t2 = sitk.ReadImage(t2_fn)
except Exception as e:
print (e)
t2 = sitk.Image()
try:
msk = sitk.ReadImage(truth_fn);
msk.SetOrigin(t1.GetOrigin())
msk.SetDirection(t1.GetDirection())
msk.SetSpacing(t1.GetSpacing())
except Exception as e:
print (e)
msk = sitk.Image()
return (t1, t1c, fl, t2, msk, isComplete);
def preprocessSITK(img, img_rows, img_cols, resize_factor=1):
"""
crops, rescales, does the bias field correction on an sitk image
----
Input: sitk image
Output: sitk image
"""
si_img = img.GetSize()
sp_img = img.GetSpacing()
#crop to the desired size:
low_boundary = [int((si_img[0]-img_rows)/2),int((si_img[1]-img_cols)/2), 0]
upper_boundary = [int((si_img[0]-img_rows+1)/2),int((si_img[1]-img_cols+1)/2),0]
pr_img = sitk.Crop(img, low_boundary, upper_boundary)
if not resize_factor==1:
pr_img = sitk.Shrink(pr_img,[resize_factor, resize_factor, 1])
print ("Resizing to", pr_img.GetSize())
return pr_img
def normalize(img_arr):
"""
intensity preprocessing
"""
#new_img_arr = (img_arr-np.min(img_arr))/(np.max(img_arr)-np.min(img_arr))*255
new_img_arr = (img_arr-np.mean(img_arr))/np.std(img_arr)
return new_img_arr
def create_datasets_4(img_path, img_rows, img_cols, img_slices, slice_by=5, resize_factor = 1, out_path='.'):
"""
creates training with 4 Inputs, and 5 outputs (1-necrosis,2-edema,
3-non-enhancing-tumor, 4-enhancing tumore, 5 - rest brain)
"""
img_list = os.listdir(img_path)
slices_per_case = 155
n_labels = 4
n_inputs = 4
img_rows_ss = img_rows/resize_factor
img_cols_ss = img_cols/resize_factor
#training
tr_n_cases = 273 # max number of cases in tcia
tr_n_slices = slices_per_case*tr_n_cases
tr_label_counts = np.zeros(n_labels+2)
tr_img_shape = (tr_n_slices, img_rows_ss, img_cols_ss, n_inputs)
tr_msk_shape = (tr_n_slices, img_rows_ss, img_cols_ss, n_labels)
tr_imgs = np.ndarray(tr_img_shape, dtype=np.float)
tr_msks = np.ndarray(tr_msk_shape, dtype=np.float)
#testing
te_n_cases = 60
te_n_slices = slices_per_case*te_n_cases
te_img_shape = (te_n_slices, img_rows_ss, img_cols_ss, n_inputs)
te_msk_shape = (te_n_slices, img_rows_ss, img_cols_ss, n_labels)
te_imgs = np.ndarray(te_img_shape, dtype=np.float)
te_msks = np.ndarray(te_msk_shape, dtype=np.float)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
tr_i = 0
te_i = 0
slicesTr = 0
slicesTe = 0
curr_sl_tr = 0
curr_sl_te = 0
curr_cs_te = 0
for i, img_dir_fn in enumerate(img_list):
data_dir = os.path.join(img_path,img_dir_fn)
# skip if is not a folder
if not os.path.isdir(data_dir):
continue
# find out which on is in training
is_tr = True;
if i % 5 == 0:
is_tr = False
print (i, "Train:", is_tr, "", end='')
(t1p, t1, fl, t2, msk, isComplete) = get_data_from_dir(data_dir)
#preprocess:crop and rescale
t1 = preprocessSITK(t1, img_rows, img_cols, resize_factor)
t1p = preprocessSITK(t1p,img_rows, img_cols, resize_factor)
fl = preprocessSITK(fl, img_rows, img_cols, resize_factor)
t2 = preprocessSITK(t2, img_rows, img_cols, resize_factor)
msk = preprocessSITK(msk,img_rows, img_cols, resize_factor)
#preprocess: rescale intensity to 0 mean and 1 standard deviation
t1Arr = normalize(sitk.GetArrayFromImage(t1).astype('float'))
t1pArr = normalize(sitk.GetArrayFromImage(t1p).astype('float'))
flArr = normalize(sitk.GetArrayFromImage(fl).astype('float'))
t2Arr = normalize(sitk.GetArrayFromImage(t2).astype('float'))
imgArr = np.zeros((slices_per_case, img_rows_ss, img_cols_ss,n_inputs))
imgArr[:,:,:,0] = t1Arr
imgArr[:,:,:,1] = t2Arr
imgArr[:,:,:,2] = flArr
imgArr[:,:,:,3] = t1pArr
mskArr = np.zeros((slices_per_case, img_rows_ss, img_cols_ss,n_labels))
mskArrTmp = sitk.GetArrayFromImage(msk)
mskArr[:,:,:,0] = (mskArrTmp==1).astype('float')
mskArr[:,:,:,1] = (mskArrTmp==2).astype('float')
mskArr[:,:,:,2] = (mskArrTmp==3).astype('float')
mskArr[:,:,:,3] = (mskArrTmp==4).astype('float')
n_slice = 0
minSlice = 0
maxSlice = slices_per_case
for curr_slice in range(slices_per_case):#leasionSlices:
n_slice +=1
# is slice in training cases, but not used from training,or testin
#in the first state
if n_slice % slice_by == 0:
print ('.', sep='', end='')
is_used = True
else:
is_used = False
imgSl = imgArr[curr_slice,:,:,:]
mskSl = mskArr[curr_slice,:,:,:]
# set slice
if is_tr:
# regular training slices
if is_used:
if curr_sl_tr % 2 == 0:
tr_imgs[curr_sl_tr,:,:,:] = imgSl
tr_msks[curr_sl_tr,:,:,:] = mskSl
else: # flip
tr_imgs[curr_sl_tr,:,:,:] = cv2.flip(imgSl,1).reshape(imgSl.shape)
tr_msks[curr_sl_tr,:,:,:] = cv2.flip(mskSl,1).reshape(mskSl.shape)
curr_sl_tr += 1
else:
if is_used:
te_imgs[curr_sl_te,:,:,:] = imgSl
te_msks[curr_sl_te,:,:,:] = mskSl
curr_sl_te += 1
#new line needed for the ... simple progress bar
print ('\n')
if is_tr:
tr_i += 1
slicesTr += maxSlice - minSlice+1
else:
te_i += 1
slicesTe += maxSlice - minSlice+1
print('Done loading ',slicesTr, slicesTe, curr_sl_tr, curr_sl_te)
### just write the actually added slices
tr_imgs = tr_imgs[0:curr_sl_tr,:,:,:]
tr_msks = tr_msks[0:curr_sl_tr,:,:,:]
np.save(os.path.join(out_path,'imgs_train.npy'), tr_imgs)
np.save(os.path.join(out_path,'msks_train.npy'), tr_msks)
te_imgs = te_imgs[0:curr_sl_te,:,:,:]
te_msks = te_msks[0:curr_sl_te,:,:,:]
np.save(os.path.join(out_path,'imgs_test.npy'), te_imgs)
np.save(os.path.join(out_path,'msks_test.npy'), te_msks)
print('Saving to .npy files done.')
print('Train ', curr_sl_tr)
print('Test ', curr_sl_te)
def load_data(data_path, prefix = "_train"):
imgs_train = np.load(os.path.join(data_path, 'imgs'+prefix+'.npy'), mmap_mode='r', allow_pickle=False)
msks_train = np.load(os.path.join(data_path, 'msks'+prefix+'.npy'), mmap_mode='r', allow_pickle=False)
return imgs_train, msks_train
def update_channels(imgs, msks, input_no=3, output_no=3, mode=1, CHANNEL_LAST=True):
"""
changes the order or which channels are used to allow full testing. Uses both
Imgs and msks as input since different things may be done to both
---
mode: int between 1-3
"""
imgs = imgs.astype('float32')
msks = msks.astype('float32')
if CHANNEL_LAST:
shp = imgs.shape
new_imgs = np.zeros((shp[0],shp[1],shp[2],input_no))
new_msks = np.zeros((shp[0],shp[1],shp[2],output_no))
if mode==1:
new_imgs[:,:,:,0] = imgs[:,:,:,2] # flair
new_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]
#print('-'*10,' Whole tumor', '-'*10)
elif mode == 2:
#core (non enhancing)
new_imgs[:,:,:,0] = imgs[:,:,:,0] # t1 post
new_msks[:,:,:,0] = msks[:,:,:,3]
#print('-'*10,' Predicing enhancing tumor', '-'*10)
elif mode == 3:
#core (non enhancing)
new_imgs[:,:,:,0] = imgs[:,:,:,1]# t2 post
new_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,2]+msks[:,:,:,3]# active core
#print('-'*10,' Predicing active Core', '-'*10)
else:
new_msks[:,:,:,0] = msks[:,:,:,0]+msks[:,:,:,1]+msks[:,:,:,2]+msks[:,:,:,3]
else:
shp = imgs.shape
new_imgs = | np.zeros((shp[0],input_no, shp[2],shp[3])) | numpy.zeros |
import torch
from lib.utils import log, he_normal, removeSmallIslands, combineLabels
from lib.utils import softmax2onehot, sigmoid2onehot
import os, time, json
from torch import nn
from datetime import datetime
import numpy as np
from lib.metric import Metric
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def initialize(self, device, output="", model_state=""):
"""Sets the device, output path, and loads model's parameters if needed
Args:
`device`: Device where the computations will be performed. "cuda:0"
`output`: Path where the output will be saved. If no output is
given, don't expect it will save anything. If by any change tries
to save something, it will probably throw an error.
`model_state`: Path to load stored parameters.
"""
# Bring the model to GPU
self.device = device
self.out_path = output
self.to(self.device)
# Load or initialize weights
if model_state != "":
print("Loading previous model")
self.load_state_dict(torch.load(model_state, map_location=self.device))
else:
def weight_init(m):
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv3d):
he_normal(m.weight)
torch.nn.init.zeros_(m.bias)
self.apply(weight_init)
def fit(self, tr_loader, val_loader, epochs, val_interval,
loss, val_metrics, opt):
"""Trains the NN.
Args:
`tr_loader`: DataLoader with the training set.
`val_loader`: DataLoader with the validaiton set.
`epochs`: Number of epochs to train the model. If 0, no train.
`val_interval`: After how many epochs to perform validation.
`loss`: Name of the loss function.
`val_metrics`: Which metrics to measure at validation time.
`opt`: Optimizer.
"""
t0 = time.time()
e = 1
# Expected classes of our dataset
measure_classes = {0: "background", 1: "contra", 2: "R_hemisphere"}
# Which classes will be reported during validation
measure_classes_mean = | np.array([1, 2]) | numpy.array |
import os
import torch
import yaml
import numpy as np
from PIL import Image
from skimage import io
import torch.nn.functional as F
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def tif_loader(path):
aDiv = np.array(io.imread(path))
aDivMax = aDiv.max()
aDivMin = aDiv.min()
aDiv = (aDiv - aDivMin) / (aDivMax - aDivMin)
#aDiv = aDiv/aDivMax
h, w = aDiv.shape
aDiv = np.stack([aDiv, aDiv, aDiv], axis=2)
aDiv = np.asarray(aDiv, np.float32)
return aDiv
def default_loader(path):
return pil_loader(path)
#return tif_loader(path)
def tensor_img_to_npimg(tensor_img):
"""
Turn a tensor image with shape CxHxW to a numpy array image with shape HxWxC
:param tensor_img:
:return: a numpy array image with shape HxWxC
"""
if not (torch.is_tensor(tensor_img) and tensor_img.ndimension() == 3):
raise NotImplementedError("Not supported tensor image. Only tensors with dimension CxHxW are supported.")
npimg = np.transpose(tensor_img.numpy(), (1, 2, 0))
npimg = npimg.squeeze()
assert isinstance(npimg, np.ndarray) and (npimg.ndim in {2, 3})
return npimg
# Change the values of tensor x from range [0, 1] to [-1, 1]
def normalize(x):
#return x.mul_(2).add_(-1)
return x.mul_(2).add_(-1)
def transfer2tensor(x):
'''
transfer the ndarray of tif into tensor
Args:
x:
Returns:
'''
x = np.asarray(x, dtype=np.float32)
x_norm = x/4294967295
x_tensor = torch.from_numpy(x_norm)
x_tensor = x_tensor.float()
#x_tensor = torch.tensor(x_tensor.clone().detach(), dtype=torch.float32)
return x_tensor.mul_(2).add_(-0.6)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
# def random_bbox(config, batch_size):
# """Generate a random tlhw with configuration.
#
# Args:
# config: Config should have configuration including img
#
# Returns:
# tuple: (top, left, height, width)
#
# """
# img_height, img_width, _ = config['image_shape']
# h, w = config['mask_shape']
# margin_height, margin_width = config['margin']
# maxt = img_height - margin_height - h
# maxl = img_width - margin_width - w
# bbox_list = []
# if config['mask_batch_same']:
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
# bbox_list.append((t, l, h, w))
# bbox_list = bbox_list * batch_size
# else:
# for i in range(batch_size):
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
# bbox_list.append((t, l, h, w))
#
# return torch.tensor(bbox_list, dtype=torch.int64)
def random_bbox(config, batch_size):
"""Generate a random tlhw with configuration.
Args:
config: Config should have configuration including img
Returns:
tuple: (top, left, height, width)
"""
img_height, img_width, _ = config['image_shape']
h, w = config['mask_shape']
margin_height, margin_width = config['margin']
maxt = img_height - margin_height - h
maxl = img_width - margin_width - w
bbox_list = []
if config['mask_batch_same']:
#t = np.random.randint(margin_height, maxt)
#l = np.random.randint(margin_width, maxl)
t = (img_height - h)//2 ## center mask
l = (img_width - w) //2
bbox_list.append((t, l, h, w))
bbox_list = bbox_list * batch_size
else:
for i in range(batch_size):
# t = np.random.randint(margin_height, maxt)
# l = np.random.randint(margin_width, maxl)
t = (img_height - h) // 2 ## center mask
l = (img_width - w) // 2
bbox_list.append((t, l, h, w))
return torch.tensor(bbox_list, dtype=torch.int64)
def test_random_bbox():
image_shape = [256, 256, 3]
mask_shape = [128, 128]
margin = [0, 0]
bbox = random_bbox(image_shape)
return bbox
def bbox2mask(bboxes, height, width, max_delta_h, max_delta_w):
batch_size = bboxes.size(0)
mask = torch.zeros((batch_size, 1, height, width), dtype=torch.float32)
for i in range(batch_size):
bbox = bboxes[i]
#delta_h = np.random.randint(max_delta_h // 2 + 1)
#delta_w = np.random.randint(max_delta_w // 2 + 1)
delta_h = 0
delta_w = 0
mask[i, :, bbox[0] + delta_h:bbox[0] + bbox[2] - delta_h, bbox[1] + delta_w:bbox[1] + bbox[3] - delta_w] = 1.
return mask
def test_bbox2mask():
image_shape = [256, 256, 3]
mask_shape = [128, 128]
margin = [0, 0]
max_delta_shape = [32, 32]
bbox = random_bbox(image_shape)
mask = bbox2mask(bbox, image_shape[0], image_shape[1], max_delta_shape[0], max_delta_shape[1])
return mask
def local_patch(x, bbox_list):
assert len(x.size()) == 4
patches = []
for i, bbox in enumerate(bbox_list):
t, l, h, w = bbox
patches.append(x[i, :, t:t + h, l:l + w])
return torch.stack(patches, dim=0)
def mask_image(x, bboxes, config):
height, width, _ = config['image_shape']
max_delta_h, max_delta_w = config['max_delta_shape']
mask = bbox2mask(bboxes, height, width, max_delta_h, max_delta_w)
if x.is_cuda:
mask = mask.cuda()
if config['mask_type'] == 'hole':
#print(x.shape)
#print('Mask ', mask.shape)
result = x * (1. - mask)
elif config['mask_type'] == 'mosaic':
# TODO: Matching the mosaic patch size and the mask size
mosaic_unit_size = config['mosaic_unit_size']
downsampled_image = F.interpolate(x, scale_factor=1. / mosaic_unit_size, mode='nearest')
upsampled_image = F.interpolate(downsampled_image, size=(height, width), mode='nearest')
result = upsampled_image * mask + x * (1. - mask)
else:
raise NotImplementedError('Not implemented mask type.')
return result, mask
def spatial_discounting_mask(config):
"""Generate spatial discounting mask constant.
Spatial discounting mask is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
Args:
config: Config should have configuration including HEIGHT, WIDTH,
DISCOUNTED_MASK.
Returns:
tf.Tensor: spatial discounting mask
"""
gamma = config['spatial_discounting_gamma']
height, width = config['mask_shape']
shape = [1, 1, height, width]
if config['discounted_mask']:
mask_values = np.ones((height, width))
for i in range(height):
for j in range(width):
mask_values[i, j] = max(
gamma ** min(i, height - i),
gamma ** min(j, width - j))
mask_values = np.expand_dims(mask_values, 0)
mask_values = np.expand_dims(mask_values, 0)
else:
mask_values = np.ones(shape)
spatial_discounting_mask_tensor = torch.tensor(mask_values, dtype=torch.float32)
if config['cuda']:
spatial_discounting_mask_tensor = spatial_discounting_mask_tensor.cuda()
return spatial_discounting_mask_tensor
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
def flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, | np.max(u) | numpy.max |
import numpy
import numpy.matlib
import copy
import pandas
import wave
import struct
import os
import math
import ctypes
import multiprocessing
import warnings
import scipy
from scipy import ndimage
import scipy.stats as stats
from scipy.fftpack import fft
from scipy.signal import decimate
from scipy.signal import lfilter
from scipy.fftpack.realtransforms import dct
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
def read_wav(input_file_name):
"""
:param input_file_name:
:return:
"""
wfh = wave.open(input_file_name, "r")
(nchannels, sampwidth, framerate, nframes, comptype, compname) = wfh.getparams()
raw = wfh.readframes(nframes * nchannels)
out = struct.unpack_from("%dh" % nframes * nchannels, raw)
sig = numpy.reshape(numpy.array(out), (-1, nchannels)).squeeze()
wfh.close()
return sig.astype(numpy.float32), framerate, sampwidth
def read_pcm(input_file_name):
"""Read signal from single channel PCM 16 bits
:param input_file_name: name of the PCM file to read.
:return: the audio signal read from the file in a ndarray encoded on 16 bits, None and 2 (depth of the encoding in bytes)
"""
with open(input_file_name, 'rb') as f:
f.seek(0, 2) # Go to te end of the file
# get the sample count
sample_count = int(f.tell() / 2)
f.seek(0, 0) # got to the begining of the file
data = numpy.asarray(struct.unpack('<' + 'h' * sample_count, f.read()))
return data.astype(numpy.float32), None, 2
def read_audio(input_file_name, framerate=None):
""" Read a 1 or 2-channel audio file in SPHERE, WAVE or RAW PCM format.
The format is determined from the file extension.
If the sample rate read from the file is a multiple of the one given
as parameter, we apply a decimation function to subsample the signal.
:param input_file_name: name of the file to read from
:param framerate: frame rate, optional, if lower than the one read from the file, subsampling is applied
:return: the signal as a numpy array and the sampling frequency
"""
if framerate is None:
raise TypeError("Expected sampling frequency required in sidekit.frontend.io.read_audio")
ext = os.path.splitext(input_file_name)[-1]
if ext.lower() == '.sph':
sig, read_framerate, sampwidth = read_sph(input_file_name, 'p')
elif ext.lower() == '.wav' or ext.lower() == '.wave':
sig, read_framerate, sampwidth = read_wav(input_file_name)
elif ext.lower() == '.pcm' or ext.lower() == '.raw':
sig, read_framerate, sampwidth = read_pcm(input_file_name)
read_framerate = framerate
else:
raise TypeError("Unknown extension of audio file")
# Convert to 16 bit encoding if needed
sig *= (2**(15-sampwidth))
if framerate > read_framerate:
print("Warning in read_audio, up-sampling function is not implemented yet!")
elif read_framerate % float(framerate) == 0 and not framerate == read_framerate:
print("downsample")
sig = decimate(sig, int(read_framerate / float(framerate)), n=None, ftype='iir', axis=0)
return sig.astype(numpy.float32), framerate
def rasta_filt(x):
"""Apply RASTA filtering to the input signal.
:param x: the input audio signal to filter.
cols of x = critical bands, rows of x = frame
same for y but after filtering
default filter is single pole at 0.94
"""
x = x.T
numerator = numpy.arange(.2, -.3, -.1)
denominator = numpy.array([1, -0.94])
# Initialize the state. This avoids a big spike at the beginning
# resulting from the dc offset level in each band.
# (this is effectively what rasta/rasta_filt.c does).
# Because Matlab uses a DF2Trans implementation, we have to
# specify the FIR part to get the state right (but not the IIR part)
y = numpy.zeros(x.shape)
zf = numpy.zeros((x.shape[0], 4))
for i in range(y.shape[0]):
y[i, :4], zf[i, :4] = lfilter(numerator, 1, x[i, :4], axis=-1, zi=[0, 0, 0, 0])
# .. but don't keep any of these values, just output zero at the beginning
y = numpy.zeros(x.shape)
# Apply the full filter to the rest of the signal, append it
for i in range(y.shape[0]):
y[i, 4:] = lfilter(numerator, denominator, x[i, 4:], axis=-1, zi=zf[i, :])[0]
return y.T
def cms(features, label=None, global_mean=None):
"""Performs cepstral mean subtraction
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: a logical vector
:param global_mean: pre-computed mean to use for feature normalization if given
:return: a feature stream
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if label.sum() == 0:
mu = numpy.zeros((features.shape[1]))
if global_mean is not None:
mu = global_mean
else:
mu = numpy.mean(features[label, :], axis=0)
features -= mu
def cmvn(features, label=None, global_mean=None, global_std=None):
"""Performs mean and variance normalization
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param global_mean: pre-computed mean to use for feature normalization if given
:param global_std: pre-computed standard deviation to use for feature normalization if given
:param label: a logical verctor
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if global_mean is not None and global_std is not None:
mu = global_mean
stdev = global_std
features -= mu
features /= stdev
elif not label.sum() == 0:
mu = numpy.mean(features[label, :], axis=0)
stdev = numpy.std(features[label, :], axis=0)
features -= mu
features /= stdev
def stg(features, label=None, win=301):
"""Performs feature warping on a sliding window
:param features: a feature stream of dimension dim x nframes
where dim is the dimension of the acoustic features and nframes the
number of frames in the stream
:param label: label of selected frames to compute the Short Term Gaussianization, by default, al frames are used
:param win: size of the frame window to consider, must be an odd number to get a symetric context on left and right
:return: a sequence of features
"""
# If no label file as input: all speech are speech
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
speech_features = features[label, :]
add_a_feature = False
if win % 2 == 1:
# one feature per line
nframes, dim = numpy.shape(speech_features)
# If the number of frames is not enough for one window
if nframes < win:
# if the number of frames is not odd, duplicate the last frame
# if nframes % 2 == 1:
if not nframes % 2 == 1:
nframes += 1
add_a_feature = True
speech_features = numpy.concatenate((speech_features, [speech_features[-1, ]]))
win = nframes
# create the output feature stream
stg_features = numpy.zeros(numpy.shape(speech_features))
# Process first window
r = numpy.argsort(speech_features[:win, ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[: (win - 1) / 2] + 0.5) / win
stg_features[: (win - 1) / 2, :] = stats.norm.ppf(arg, 0, 1)
# process all following windows except the last one
for m in range(int((win - 1) / 2), int(nframes - (win - 1) / 2)):
idx = list(range(int(m - (win - 1) / 2), int(m + (win - 1) / 2 + 1)))
foo = speech_features[idx, :]
r = numpy.sum(foo < foo[(win - 1) / 2], axis=0) + 1
arg = (r - 0.5) / win
stg_features[m, :] = stats.norm.ppf(arg, 0, 1)
# Process the last window
r = numpy.argsort(speech_features[list(range(nframes - win, nframes)), ], axis=0)
r = numpy.argsort(r, axis=0)
arg = (r[(win + 1) / 2: win, :] + 0.5) / win
stg_features[list(range(int(nframes - (win - 1) / 2), nframes)), ] = stats.norm.ppf(arg, 0, 1)
else:
# Raise an exception
raise Exception('Sliding window should have an odd length')
# wrapFeatures = np.copy(features)
if add_a_feature:
stg_features = stg_features[:-1]
features[label, :] = stg_features
def cep_sliding_norm(features, win=301, label=None, center=True, reduce=False):
"""
Performs a cepstal mean substitution and standard deviation normalization
in a sliding windows. MFCC is modified.
:param features: the MFCC, a numpy array
:param win: the size of the sliding windows
:param label: vad label if available
:param center: performs mean subtraction
:param reduce: performs standard deviation division
"""
if label is None:
label = numpy.ones(features.shape[0]).astype(bool)
if numpy.sum(label) <= win:
if reduce:
cmvn(features, label)
else:
cms(features, label)
else:
d_win = win // 2
df = pandas.DataFrame(features[label, :])
r = df.rolling(window=win, center=True)
mean = r.mean().values
std = r.std().values
mean[0:d_win, :] = mean[d_win, :]
mean[-d_win:, :] = mean[-d_win-1, :]
std[0:d_win, :] = std[d_win, :]
std[-d_win:, :] = std[-d_win-1, :]
if center:
features[label, :] -= mean
if reduce:
features[label, :] /= std
def pre_emphasis(input_sig, pre):
"""Pre-emphasis of an audio signal.
:param input_sig: the input vector of signal to pre emphasize
:param pre: value that defines the pre-emphasis filter.
"""
if input_sig.ndim == 1:
return (input_sig - numpy.c_[input_sig[numpy.newaxis, :][..., :1],
input_sig[numpy.newaxis, :][..., :-1]].squeeze() * pre)
else:
return input_sig - numpy.c_[input_sig[..., :1], input_sig[..., :-1]] * pre
"""Generate a new array that chops the given array along the given axis
into overlapping frames.
This method has been implemented by <NAME>,
as part of the talk box toolkit
example::
segment_axis(arange(10), 4, 2)
array([[0, 1, 2, 3],
( [2, 3, 4, 5],
[4, 5, 6, 7],
[6, 7, 8, 9]])
:param a: the array to segment
:param length: the length of each frame
:param overlap: the number of array elements by which the frames should overlap
:param axis: the axis to operate on; if None, act on the flattened array
:param end: what to do with the last frame, if the array is not evenly
divisible into pieces. Options are:
- 'cut' Simply discard the extra values
- 'wrap' Copy values from the beginning of the array
- 'pad' Pad with a constant value
:param endvalue: the value to use for end='pad'
:return: a ndarray
The array is not copied unless necessary (either because it is unevenly
strided and being flattened or because end is set to 'pad' or 'wrap').
"""
if axis is None:
a = numpy.ravel(a) # may copy
axis = 0
l = a.shape[axis]
if overlap >= length:
raise ValueError("frames cannot overlap by more than 100%")
if overlap < 0 or length <= 0:
raise ValueError("overlap must be nonnegative and length must" +
"be positive")
if l < length or (l - length) % (length - overlap):
if l > length:
roundup = length + (1 + (l - length) // (length - overlap)) * (length - overlap)
rounddown = length + ((l - length) // (length - overlap)) * (length - overlap)
else:
roundup = length
rounddown = 0
assert rounddown < l < roundup
assert roundup == rounddown + (length - overlap) or (roundup == length and rounddown == 0)
a = a.swapaxes(-1, axis)
if end == 'cut':
a = a[..., :rounddown]
l = a.shape[0]
elif end in ['pad', 'wrap']: # copying will be necessary
s = list(a.shape)
s[-1] = roundup
b = numpy.empty(s, dtype=a.dtype)
b[..., :l] = a
if end == 'pad':
b[..., l:] = endvalue
elif end == 'wrap':
b[..., l:] = a[..., :roundup - l]
a = b
a = a.swapaxes(-1, axis)
if l == 0:
raise ValueError("Not enough data points to segment array " +
"in 'cut' mode; try 'pad' or 'wrap'")
assert l >= length
assert (l - length) % (length - overlap) == 0
n = 1 + (l - length) // (length - overlap)
s = a.strides[axis]
new_shape = a.shape[:axis] + (n, length) + a.shape[axis + 1:]
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
try:
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
except TypeError:
a = a.copy()
# Shape doesn't change but strides does
new_strides = a.strides[:axis] + ((length - overlap) * s, s) + a.strides[axis + 1:]
return numpy.ndarray.__new__(numpy.ndarray, strides=new_strides,
shape=new_shape, buffer=a, dtype=a.dtype)
def speech_enhancement(X, Gain, NN):
"""This program is only to process the single file seperated by the silence
section if the silence section is detected, then a counter to number of
buffer is set and pre-processing is required.
Usage: SpeechENhance(wavefilename, Gain, Noise_floor)
:param X: input audio signal
:param Gain: default value is 0.9, suggestion range 0.6 to 1.4,
higher value means more subtraction or noise redcution
:param NN:
:return: a 1-dimensional array of boolean that
is True for high energy frames.
Copyright 2014 <NAME> and <NAME>
"""
if X.shape[0] < 512: # creer une exception
return X
num1 = 40 # dsiable buffer number
Alpha = 0.75 # original value is 0.9
FrameSize = 32 * 2 # 256*2
FrameShift = int(FrameSize / NN) # FrameSize/2=128
nfft = FrameSize # = FrameSize
Fmax = int(numpy.floor(nfft / 2) + 1) # 128+1 = 129
# arising hamming windows
Hamm = 1.08 * (0.54 - 0.46 * numpy.cos(2 * numpy.pi * numpy.arange(FrameSize) / (FrameSize - 1)))
y0 = numpy.zeros(FrameSize - FrameShift) # 128 zeros
Eabsn = numpy.zeros(Fmax)
Eta1 = Eabsn
###################################################################
# initial parameter for noise min
mb = numpy.ones((1 + FrameSize // 2, 4)) * FrameSize / 2 # 129x4 set four buffer * FrameSize/2
im = 0
Beta1 = 0.9024 # seems that small value is better;
pxn = numpy.zeros(1 + FrameSize // 2) # 1+FrameSize/2=129 zeros vector
###################################################################
old_absx = Eabsn
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[
numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
###################################################################
# add the pre-noise estimates
for i in range(200):
Frame += 1
fftn = fft(x * Hamm) # get its spectrum
absn = numpy.abs(fftn[0:Fmax]) # get its amplitude
# add the following part from noise estimation algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # Beta=0.9231 recursive pxn
im = (im + 1) % 40 # noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
# 0-2 vector shifted to 1 to 3
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
# over_sub_noise= oversubtraction factor
# end of noise detection algotihm
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
if In_data.shape[0] < FrameShift: # to check file is out
EOF = 1
break
else:
x[FrameSize - FrameShift:FrameSize] = In_data # shift new 128 to position 129 to FrameSize location
# end of for loop for noise estimation
# end of prenoise estimation ************************
x = numpy.zeros(FrameSize)
x[FrameSize - FrameShift:FrameSize] = X[numpy.arange(numpy.min((int(FrameShift), X.shape[0])))]
if x.shape[0] < FrameSize:
EOF = 1
return X
EOF = 0
Frame = 0
X1 = numpy.zeros(X.shape)
Frame = 0
while EOF == 0:
Frame += 1
xwin = x * Hamm
fftx = fft(xwin, nfft) # FrameSize FFT
absx = numpy.abs(fftx[0:Fmax]) # Fmax=129,get amplitude of x
argx = fftx[:Fmax] / (absx + numpy.spacing(1)) # normalize x spectrum phase
absn = absx
# add the following part from rainer algorithm
pxn = Beta1 * pxn + (1 - Beta1) * absn # s Beta=0.9231 recursive pxn
im = int((im + 1) % (num1 * NN / 2)) # original =40 noise_memory=47; im=0 (init) for noise level estimation
if im:
mb[:, 0] = numpy.minimum(mb[:, 0], pxn) # 129 by 4 im<>0 update the first vector from PXN
else:
mb[:, 1:] = mb[:, :3] # im==0 every 47 time shift pxn to first vector of mb
mb[:, 0] = pxn
pn = 2 * numpy.min(mb, axis=1) # pn = 129x1po(9)=1.5 noise level estimate compensation
Eabsn = pn
Gaina = Gain
temp1 = Eabsn * Gaina
Eta1 = Alpha * old_absx + (1 - Alpha) * numpy.maximum(absx - temp1, 0)
new_absx = (absx * Eta1) / (Eta1 + temp1) # wiener filter
old_absx = new_absx
ffty = new_absx * argx # multiply amplitude with its normalized spectrum
y = numpy.real(numpy.fft.fftpack.ifft(numpy.concatenate((ffty,
numpy.conj(ffty[numpy.arange(Fmax - 2, 0, -1)])))))
y[:FrameSize - FrameShift] = y[:FrameSize - FrameShift] + y0
y0 = y[FrameShift:FrameSize] # keep 129 to FrameSize point samples
x[:FrameSize - FrameShift] = x[FrameShift:FrameSize]
index1 = numpy.arange(FrameShift * Frame, numpy.min((FrameShift * (Frame + 1), X.shape[0])))
In_data = X[index1] # fread(ifp, FrameShift, 'short');
z = 2 / NN * y[:FrameShift] # left channel is the original signal
z /= 1.15
z = numpy.minimum(z, 32767)
z = numpy.maximum(z, -32768)
index0 = numpy.arange(FrameShift * (Frame - 1), FrameShift * Frame)
if not all(index0 < X1.shape[0]):
idx = 0
while (index0[idx] < X1.shape[0]) & (idx < index0.shape[0]):
X1[index0[idx]] = z[idx]
idx += 1
else:
X1[index0] = z
if In_data.shape[0] == 0:
EOF = 1
else:
x[numpy.arange(FrameSize - FrameShift, FrameSize + In_data.shape[0] - FrameShift)] = In_data
X1 = X1[X1.shape[0] - X.shape[0]:]
# }
# catch{
# }
return X1
def vad_percentil(log_energy, percent):
"""
:param log_energy:
:param percent:
:return:
"""
thr = numpy.percentile(log_energy, percent)
return log_energy > thr, thr
def vad_energy(log_energy,
distrib_nb=3,
nb_train_it=8,
flooring=0.0001, ceiling=1.0,
alpha=2):
# center and normalize the energy
log_energy = (log_energy - numpy.mean(log_energy)) / numpy.std(log_energy)
# Initialize a Mixture with 2 or 3 distributions
world = Mixture()
# set the covariance of each component to 1.0 and the mean to mu + meanIncrement
world.cst = numpy.ones(distrib_nb) / (numpy.pi / 2.0)
world.det = numpy.ones(distrib_nb)
world.mu = -2 + 4.0 * numpy.arange(distrib_nb) / (distrib_nb - 1)
world.mu = world.mu[:, numpy.newaxis]
world.invcov = numpy.ones((distrib_nb, 1))
# set equal weights for each component
world.w = numpy.ones(distrib_nb) / distrib_nb
world.cov_var_ctl = copy.deepcopy(world.invcov)
# Initialize the accumulator
accum = copy.deepcopy(world)
# Perform nbTrainIt iterations of EM
for it in range(nb_train_it):
accum._reset()
# E-step
world._expectation(accum, log_energy)
# M-step
world._maximization(accum, ceiling, flooring)
# Compute threshold
threshold = world.mu.max() - alpha * numpy.sqrt(1.0 / world.invcov[world.mu.argmax(), 0])
# Apply frame selection with the current threshold
label = log_energy > threshold
return label, threshold
def vad_snr(sig, snr, fs=16000, shift=0.01, nwin=256):
"""Select high energy frames based on the Signal to Noise Ratio
of the signal.
Input signal is expected encoded on 16 bits
:param sig: the input audio signal
:param snr: Signal to noise ratio to consider
:param fs: sampling frequency of the input signal in Hz. Default is 16000.
:param shift: shift between two frames in seconds. Default is 0.01
:param nwin: number of samples of the sliding window. Default is 256.
"""
overlap = nwin - int(shift * fs)
sig /= 32768.
sig = speech_enhancement(numpy.squeeze(sig), 1.2, 2)
# Compute Standard deviation
sig += 0.1 * numpy.random.randn(sig.shape[0])
std2 = segment_axis(sig, nwin, overlap, axis=None, end='cut', endvalue=0).T
std2 = numpy.std(std2, axis=0)
std2 = 20 * numpy.log10(std2) # convert the dB
# APPLY VAD
label = (std2 > numpy.max(std2) - snr) & (std2 > -75)
return label
def label_fusion(label, win=3):
"""Apply a morphological filtering on the label to remove isolated labels.
In case the input is a two channel label (2D ndarray of boolean of same
length) the labels of two channels are fused to remove
overlaping segments of speech.
:param label: input labels given in a 1D or 2D ndarray
:param win: parameter or the morphological filters
"""
channel_nb = len(label)
if channel_nb == 2:
overlap_label = numpy.logical_and(label[0], label[1])
label[0] = numpy.logical_and(label[0], ~overlap_label)
label[1] = numpy.logical_and(label[1], ~overlap_label)
for idx, lbl in enumerate(label):
cl = ndimage.grey_closing(lbl, size=win)
label[idx] = ndimage.grey_opening(cl, size=win)
return label
def hz2mel(f, htk=True):
"""Convert an array of frequency in Hz into mel.
:param f: frequency to convert
:return: the equivalence on the mel scale.
"""
if htk:
return 2595 * numpy.log10(1 + f / 700.)
else:
f = numpy.array(f)
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = f < brkfrq
z = numpy.zeros_like(f)
# fill in parts separately
z[linpts] = (f[linpts] - f_0) / f_sp
z[~linpts] = brkpt + (numpy.log(f[~linpts] / brkfrq)) / numpy.log(logstep)
if z.shape == (1,):
return z[0]
else:
return z
def mel2hz(z, htk=True):
"""Convert an array of mel values in Hz.
:param m: ndarray of frequencies to convert in Hz.
:return: the equivalent values in Hertz.
"""
if htk:
return 700. * (10**(z / 2595.) - 1)
else:
z = numpy.array(z, dtype=float)
f_0 = 0
f_sp = 200. / 3.
brkfrq = 1000.
brkpt = (brkfrq - f_0) / f_sp
logstep = numpy.exp(numpy.log(6.4) / 27)
linpts = (z < brkpt)
f = numpy.zeros_like(z)
# fill in parts separately
f[linpts] = f_0 + f_sp * z[linpts]
f[~linpts] = brkfrq * numpy.exp(numpy.log(logstep) * (z[~linpts] - brkpt))
if f.shape == (1,):
return f[0]
else:
return f
def hz2bark(f):
"""
Convert frequencies (Hertz) to Bark frequencies
:param f: the input frequency
:return:
"""
return 6. * numpy.arcsinh(f / 600.)
def bark2hz(z):
"""
Converts frequencies Bark to Hertz (Hz)
:param z:
:return:
"""
return 600. * numpy.sinh(z / 6.)
def compute_delta(features,
win=3,
method='filter',
filt=numpy.array([.25, .5, .25, 0, -.25, -.5, -.25])):
"""features is a 2D-ndarray each row of features is a a frame
:param features: the feature frames to compute the delta coefficients
:param win: parameter that set the length of the computation window.
The size of the window is (win x 2) + 1
:param method: method used to compute the delta coefficients
can be diff or filter
:param filt: definition of the filter to use in "filter" mode, default one
is similar to SPRO4: filt=numpy.array([.2, .1, 0, -.1, -.2])
:return: the delta coefficients computed on the original features.
"""
# First and last features are appended to the begining and the end of the
# stream to avoid border effect
x = numpy.zeros((features.shape[0] + 2 * win, features.shape[1]), dtype=numpy.float32)
x[:win, :] = features[0, :]
x[win:-win, :] = features
x[-win:, :] = features[-1, :]
delta = numpy.zeros(x.shape, dtype=numpy.float32)
if method == 'diff':
filt = numpy.zeros(2 * win + 1, dtype=numpy.float32)
filt[0] = -1
filt[-1] = 1
for i in range(features.shape[1]):
delta[:, i] = numpy.convolve(features[:, i], filt)
return delta[win:-win, :]
def pca_dct(cep, left_ctx=12, right_ctx=12, p=None):
"""Apply DCT PCA as in [McLaren 2015] paper:
<NAME> and <NAME>, 'Improved Speaker Recognition
Using DCT coefficients as features' in ICASSP, 2015
A 1D-dct is applied to the cepstral coefficients on a temporal
sliding window.
The resulting matrix is then flatten and reduced by using a Principal
Component Analysis.
:param cep: a matrix of cepstral cefficients, 1 line per feature vector
:param left_ctx: number of frames to consider for left context
:param right_ctx: number of frames to consider for right context
:param p: a PCA matrix trained on a developpment set to reduce the
dimension of the features. P is a portait matrix
"""
y = numpy.r_[numpy.resize(cep[0, :], (left_ctx, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (right_ctx, cep.shape[1]))]
ceps = framing(y, win_size=left_ctx + 1 + right_ctx).transpose(0, 2, 1)
dct_temp = (dct_basis(left_ctx + 1 + right_ctx, left_ctx + 1 + right_ctx)).T
if p is None:
p = numpy.eye(dct_temp.shape[0] * cep.shape[1], dtype=numpy.float32)
return (numpy.dot(ceps.reshape(-1, dct_temp.shape[0]),
dct_temp).reshape(ceps.shape[0], -1)).dot(p)
def shifted_delta_cepstral(cep, d=1, p=3, k=7):
"""
Compute the Shifted-Delta-Cepstral features for language identification
:param cep: matrix of feature, 1 vector per line
:param d: represents the time advance and delay for the delta computation
:param k: number of delta-cepstral blocks whose delta-cepstral
coefficients are stacked to form the final feature vector
:param p: time shift between consecutive blocks.
return: cepstral coefficient concatenated with shifted deltas
"""
y = numpy.r_[numpy.resize(cep[0, :], (d, cep.shape[1])),
cep,
numpy.resize(cep[-1, :], (k * 3 + d, cep.shape[1]))]
delta = compute_delta(y, win=d, method='diff')
sdc = numpy.empty((cep.shape[0], cep.shape[1] * k))
idx = numpy.zeros(delta.shape[0], dtype='bool')
for ii in range(k):
idx[d + ii * p] = True
for ff in range(len(cep)):
sdc[ff, :] = delta[idx, :].reshape(1, -1)
idx = numpy.roll(idx, 1)
return numpy.hstack((cep, sdc))
def trfbank(fs, nfft, lowfreq, maxfreq, nlinfilt, nlogfilt, midfreq=1000):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param nlinfilt: number of linear filters to use in low frequencies
:param nlogfilt: number of log-linear filters to use in high frequencies
:param midfreq: frequency boundary between linear and log-linear filters
:return: the filter bank and the central frequencies of each filter
"""
# Total number of filters
nfilt = nlinfilt + nlogfilt
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
frequences = numpy.zeros(nfilt + 2, dtype=numpy.float32)
if nlogfilt == 0:
linsc = (maxfreq - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt + 2] = lowfreq + numpy.arange(nlinfilt + 2) * linsc
elif nlinfilt == 0:
low_mel = hz2mel(lowfreq)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2)
# mels[nlinfilt:]
melsc = (max_mel - low_mel) / (nfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences = mel2hz(mels)
else:
# Compute linear filters on [0;1000Hz]
linsc = (min([midfreq, maxfreq]) - lowfreq) / (nlinfilt + 1)
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
# Compute log-linear filters on [1000;maxfreq]
low_mel = hz2mel(min([1000, maxfreq]))
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
# Verify that mel2hz(melsc)>linsc
while mel2hz(melsc) < linsc:
# in this case, we add a linear filter
nlinfilt += 1
nlogfilt -= 1
frequences[:nlinfilt] = lowfreq + numpy.arange(nlinfilt) * linsc
low_mel = hz2mel(frequences[nlinfilt - 1] + 2 * linsc)
max_mel = hz2mel(maxfreq)
mels = numpy.zeros(nlogfilt + 2, dtype=numpy.float32)
melsc = (max_mel - low_mel) / (nlogfilt + 1)
mels[:nlogfilt + 2] = low_mel + numpy.arange(nlogfilt + 2) * melsc
# Back to the frequency domain
frequences[nlinfilt:] = mel2hz(mels)
heights = 2. / (frequences[2:] - frequences[0:-2])
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, int(numpy.floor(nfft / 2)) + 1), dtype=numpy.float32)
# FFT bins (in Hz)
n_frequences = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = frequences[i]
cen = frequences[i + 1]
hi = frequences[i + 2]
lid = numpy.arange(numpy.floor(low * nfft / fs) + 1, numpy.floor(cen * nfft / fs) + 1, dtype=numpy.int)
left_slope = heights[i] / (cen - low)
rid = numpy.arange(numpy.floor(cen * nfft / fs) + 1,
min(numpy.floor(hi * nfft / fs) + 1, nfft), dtype=numpy.int)
right_slope = heights[i] / (hi - cen)
fbank[i][lid] = left_slope * (n_frequences[lid] - low)
fbank[i][rid[:-1]] = right_slope * (hi - n_frequences[rid[:-1]])
return fbank, frequences
def mel_filter_bank(fs, nfft, lowfreq, maxfreq, widest_nlogfilt, widest_lowfreq, widest_maxfreq,):
"""Compute triangular filterbank for cepstral coefficient computation.
:param fs: sampling frequency of the original signal.
:param nfft: number of points for the Fourier Transform
:param lowfreq: lower limit of the frequency band filtered
:param maxfreq: higher limit of the frequency band filtered
:param widest_nlogfilt: number of log filters
:param widest_lowfreq: lower frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:param widest_maxfreq: higher frequency of the filter bank
:return: the filter bank and the central frequencies of each filter
"""
# ------------------------
# Compute the filter bank
# ------------------------
# Compute start/middle/end points of the triangular filters in spectral
# domain
widest_freqs = numpy.zeros(widest_nlogfilt + 2, dtype=numpy.float32)
low_mel = hz2mel(widest_lowfreq)
max_mel = hz2mel(widest_maxfreq)
mels = numpy.zeros(widest_nlogfilt+2)
melsc = (max_mel - low_mel) / (widest_nlogfilt + 1)
mels[:widest_nlogfilt + 2] = low_mel + numpy.arange(widest_nlogfilt + 2) * melsc
# Back to the frequency domain
widest_freqs = mel2hz(mels)
# Select filters in the narrow band
sub_band_freqs = numpy.array([fr for fr in widest_freqs if lowfreq <= fr <= maxfreq], dtype=numpy.float32)
heights = 2./(sub_band_freqs[2:] - sub_band_freqs[0:-2])
nfilt = sub_band_freqs.shape[0] - 2
# Compute filterbank coeff (in fft domain, in bins)
fbank = numpy.zeros((nfilt, numpy.floor(nfft/2)+1), dtype=numpy.float32)
# FFT bins (in Hz)
nfreqs = numpy.arange(nfft) / (1. * nfft) * fs
for i in range(nfilt):
low = sub_band_freqs[i]
cen = sub_band_freqs[i+1]
hi = sub_band_freqs[i+2]
lid = numpy.arange( | numpy.floor(low * nfft / fs) | numpy.floor |
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
from scipy.stats import chi2
from .core import *
# Set the font size
plt.rcParams.update({'font.size': 14})
class JointModel(ReadData, ModelFit, BaseFunc):
def __init__(self, df, formula, poly_orders=(), optim_meth='default', model_select=False):
"""
Basic function for fitting the joint models
Parameters:
- df: The dataset of interest. It should be a 'pandas' DataFrame
- formula: A formula showing the response, subject id, design
matrices. It is a string following the rules defined in R,
for example, 'y|id|t~x1+x2|x1+x3'.
[1] On the left hand side of '~', there are 3 headers of the
data, partitioned by '|':
- y: The response vector for all the subjects
- id: The ID number which identifying different subjects
- t: The vector of time, which constructs the polynomials
for modelling the means, innovation variances, and
generalised auto regressive parameters.
[2] On the right hand side of '~', there are two parts
partitioned by '|':
- x1+x2: '+' joints two headers, which are the covariates
for the mean. There can be more headers and operators.
Package 'patsy' is used to achieve that.
- x1+x3: Similar to the left part, except that they are
for the innovation variances.
- poly_orders: A tuple of length 3 or length 0. If the length is 3,
it specifies the polynomial orders of time for the mean, innovation
variance, and generalised auto regressive parameters. If the length
is 0, then the model selection procedures might be used.
- optim_meth: The optimisation algorithm used. There are 2 options:
[1] 'default': use profile likelihood estimation to update
the 3 types of parameters.
[2] 'BFGS': use BFGS method to update all 3 parameters together
If not specified, 'default' would be used
- model_select: True or False. To do model selection or not.
"""
# Read data to get the design matrices, response, and ID numbers
ReadData.__init__(self, df, formula, poly_orders)
# Get functions of use
BaseFunc.__init__(self, self.mat_X, self.mat_Z,
self. mat_W, self.vec_y, self.vec_n)
self.formula = formula
self.poly_orders = poly_orders
self.optim_meth = optim_meth
# Check if model selection precedure is required
if (isinstance(self.poly_orders, tuple) and
len(self.poly_orders) == 3 and
all(isinstance(n, int) for n in self.poly_orders)):
if model_select == False:
# just fit the model
pass
else:
# if the request of selection is given and the polynomial
# orders are given correctly, then traverse under the
# polynomial orders
ReadData.__init__(self, df, formula, ())
self._traverse_models()
self.mat_X, self.mat_Z, self.mat_W = self._new_design(
self.poly_orders)
else:
# if the given polynomial orders is not in the correct form
self.poly_orders = self._model_select()
self.mat_X, self.mat_Z, self.mat_W = self._new_design(
self.poly_orders)
ModelFit.__init__(self, self.mat_X, self.mat_Z,
self. mat_W, self.vec_y, self.vec_n, self.optim_meth)
def summary(self):
"""
Return the estimated values, maximum log likelihood, and BIC
"""
print('Model:')
print(self.formula, self.poly_orders)
print('Mean Parameters:')
print(self._bta)
print('Innovation Variance Parameters:')
print(self._lmd)
print('Autoregressive Parameters:')
print(self._gma)
print('Maximum of log-likelihood:', self.max_log_lik)
print('BIC:', self.bic)
print('')
def wald_test(self):
"""
Do the hypothesis test for each parameter
"""
i_11, i_22, i_33 = self._info_mat()
inv_11 = np.linalg.inv(i_11)
inv_22 = np.linalg.inv(i_22)
inv_33 = np.linalg.inv(i_33)
print('<NAME>')
print('Mean Parameters:')
table = []
for i in range(self._num_bta):
est = self._bta[i]
test = est**2 / inv_11[i, i]
p_val = chi2.sf(test, 1)
table.append(['beta' + str(i), est, test, p_val])
print(tabulate(table, headers=[
'', 'Estimate', 'chi-square', 'p-value']))
print('')
print('Innovation Variance Parameters:')
table = []
for i in range(self._num_lmd):
est = self._lmd[i]
test = est**2 / inv_22[i, i]
p_val = chi2.sf(test, 1)
table.append(['lambda' + str(i), est, test, p_val])
print(tabulate(table, headers=[
'', 'Estimate', 'chi-square', 'p-value']))
print('')
print('Autoregressive Parameters:')
table = []
for i in range(self._num_gma):
est = self._gma[i]
test = est**2 / inv_33[i, i]
p_val = chi2.sf(test, 1)
table.append(['gamma' + str(i), est, test, p_val])
print(tabulate(table, headers=[
'', 'Estimate', 'chi-square', 'p-value']))
print('')
def _new_design(self, poly_orders):
"""
Get the design matrices when doing model selection
"""
lhs = np.ones((len(self.vec_t), poly_orders[0] + 1))
for i in range(1, poly_orders[0] + 1):
lhs[:, i] = np.power(self.vec_t, i)
mat_X = np.hstack((lhs, self.mat_X))
lhs = np.ones((len(self.vec_t), poly_orders[1] + 1))
for i in range(1, poly_orders[1] + 1):
lhs[:, i] = np.power(self.vec_t, i)
mat_Z = np.hstack((lhs, self.mat_Z))
mat_W = self._build_mat_W(poly_orders[2] + 1)
return mat_X, mat_Z, mat_W
def _model_select(self):
"""
Model selection method in finding the best triple of the polynomial orders
"""
n_max = np.max(self.vec_n)
mat_X, mat_Z, mat_W = self._new_design((0, n_max - 1, n_max - 1))
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
temp = mf.bic
p_star = 0
for i in range(1, n_max):
# fit the model with such polynomial orders
poly_orders = (i, n_max - 1, n_max - 1)
mat_X, mat_Z, mat_W = self._new_design(poly_orders)
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
# find the smallest BIC and p*
if mf.bic < temp:
temp = mf.bic
p_star = i
mat_X, mat_Z, mat_W = self._new_design((n_max - 1, 0, n_max - 1))
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
temp = mf.bic
d_star = 0
for j in range(1, n_max):
# fit the model with such polynomial orders
poly_orders = (n_max - 1, j, n_max - 1)
mat_X, mat_Z, mat_W = self._new_design(poly_orders)
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
# find the smallest BIC and d*
if mf.bic < temp:
temp = mf.bic
d_star = j
mat_X, mat_Z, mat_W = self._new_design((n_max - 1, n_max - 1, 0))
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
temp = mf.bic
q_star = 0
for k in range(1, n_max):
# fit the model with such polynomial orders
poly_orders = (n_max - 1, n_max - 1, k)
mat_X, mat_Z, mat_W = self._new_design(poly_orders)
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
# find the smallest BIC and q*
if mf.bic < temp:
temp = mf.bic
q_star = k
return (p_star, d_star, q_star)
def _traverse_models(self):
p_max, d_max, q_max = self.poly_orders
mat_X, mat_Z, mat_W = self._new_design((0, 0, 0))
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
temp = mf.bic
for i in range(p_max + 1):
for j in range(d_max + 1):
for k in range(q_max + 1):
print(i, j, k)
mat_X, mat_Z, mat_W = self._new_design((i, j, k))
mf = ModelFit(mat_X, mat_Z, mat_W, self.vec_y, self.vec_n)
if mf.bic < temp:
temp = mf.bic
self.poly_orders = (i, j, k)
def _bootstrap_data(self):
"""
Generate bootstrap sample from data
"""
index_boot = | np.random.choice(self.m, self.m, replace=True) | numpy.random.choice |
import skimage
from skimage.io import imread, imsave
from skimage import measure
import matplotlib.pyplot as plt
from skimage.segmentation import quickshift, felzenszwalb, slic
from skimage.segmentation import mark_boundaries
from skimage.filters import gaussian, median
from skimage.morphology import disk
import numpy as np
from scipy.stats import mode
from skimage.color import rgb2grey
from skimage.transform import rescale, resize
from skimage.util import view_as_blocks, view_as_windows
from skimage.restoration import denoise_bilateral, denoise_tv_chambolle
from skimage.morphology import binary_erosion, binary_dilation, binary_opening, binary_closing
import os
from numpy import max
def window_region_merge_color(input_im, b, view_func=view_as_windows):
block_size = (b, b, 3)
pad_width = []
reshape_size = b * b
for i in range(len(block_size)):
if input_im.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (input_im.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
input_im = np.pad(input_im, pad_width=pad_width, mode='constant')
view = view_func(input_im, block_size)
flatten_view = np.transpose(view, axes=(0, 1, 2, 5, 4, 3))
flatten_view = flatten_view.reshape(flatten_view.shape[0], flatten_view.shape[1], 3, reshape_size)
return np.squeeze(mode(flatten_view, axis=3)[0])
def window_region_color(input_im, b, view_func=view_as_windows, calc_func=max):
block_size = (b, b, 3)
pad_width = []
reshape_size = b * b
for i in range(len(block_size)):
if input_im.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (input_im.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
input_im = | np.pad(input_im, pad_width=pad_width, mode='constant') | numpy.pad |
# 9.2.2 多次元ガウス分布のEMアルゴリズム
#%%
# 9.2.2項で利用するライブラリ
import numpy as np
from scipy.stats import multivariate_normal # 多次元ガウス分布
import matplotlib.pyplot as plt
#%%
## 真の分布の設定
# 次元数を設定:(固定)
D = 2
# クラスタ数を指定
K = 3
# K個の真の平均を指定
mu_truth_kd = np.array(
[[5.0, 35.0],
[-20.0, -10.0],
[30.0, -20.0]]
)
# K個の真の共分散行列を指定
sigma2_truth_kdd = np.array(
[[[250.0, 65.0], [65.0, 270.0]],
[[125.0, -45.0], [-45.0, 175.0]],
[[210.0, -15.0], [-15.0, 250.0]]]
)
# 真の混合係数を指定
pi_truth_k = np.array([0.45, 0.25, 0.3])
#%%
# 作図用のx軸のxの値を作成
x_1_line = np.linspace(
np.min(mu_truth_kd[:, 0] - 3 * np.sqrt(sigma2_truth_kdd[:, 0, 0])),
np.max(mu_truth_kd[:, 0] + 3 * np.sqrt(sigma2_truth_kdd[:, 0, 0])),
num=300
)
# 作図用のy軸のxの値を作成
x_2_line = np.linspace(
np.min(mu_truth_kd[:, 1] - 3 * np.sqrt(sigma2_truth_kdd[:, 1, 1])),
np.max(mu_truth_kd[:, 1] + 3 * np.sqrt(sigma2_truth_kdd[:, 1, 1])),
num=300
)
# 作図用の格子状の点を作成
x_1_grid, x_2_grid = np.meshgrid(x_1_line, x_2_line)
# 作図用のxの点を作成
x_point_arr = np.stack([x_1_grid.flatten(), x_2_grid.flatten()], axis=1)
# 作図用に各次元の要素数を保存
x_dim = x_1_grid.shape
print(x_dim)
# 真の分布を計算
model_dens = 0
for k in range(K):
# クラスタkの分布の確率密度を計算
tmp_dens = multivariate_normal.pdf(
x=x_point_arr, mean=mu_truth_kd[k], cov=sigma2_truth_kdd[k]
)
# K個の分布を線形結合
model_dens += pi_truth_k[k] * tmp_dens
#%%
# 真の分布を作図
plt.figure(figsize=(12, 9))
plt.contour(x_1_grid, x_2_grid, model_dens.reshape(x_dim)) # 真の分布
plt.suptitle('Mixture of Gaussians', fontsize=20)
plt.title('K=' + str(K), loc='left')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.colorbar() # 等高線の色
plt.show()
#%%
# (観測)データ数を指定
N = 250
# 潜在変数を生成
z_truth_nk = np.random.multinomial(n=1, pvals=pi_truth_k, size=N)
# クラスタ番号を抽出
_, z_truth_n = np.where(z_truth_nk == 1)
# (観測)データを生成
x_nd = np.array([
np.random.multivariate_normal(
mean=mu_truth_kd[k], cov=sigma2_truth_kdd[k], size=1
).flatten() for k in z_truth_n
])
#%%
# 観測データの散布図を作成
plt.figure(figsize=(12, 9))
for k in range(K):
k_idx, = np.where(z_truth_n == k) # クラスタkのデータのインデック
plt.scatter(x=x_nd[k_idx, 0], y=x_nd[k_idx, 1], label='cluster:' + str(k + 1)) # 観測データ
plt.contour(x_1_grid, x_2_grid, model_dens.reshape(x_dim), linestyles='--') # 真の分布
plt.suptitle('Mixture of Gaussians', fontsize=20)
plt.title('$N=' + str(N) + ', K=' + str(K) +
', \pi=(' + ', '.join([str(pi) for pi in pi_truth_k]) + ')$', loc='left')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.colorbar() # 等高線の色
plt.show()
#%%
## 初期値の設定
# 平均パラメータの初期値を生成
mu_kd = np.array([
np.random.uniform(
low=np.min(x_nd[:, d]), high=np.max(x_nd[:, d]), size=K
) for d in range(D)
]).T
# 共分散行列の初期値を指定
sigma2_kdd = np.array([np.identity(D) * 1000 for _ in range(K)])
# 混合係数の初期値を生成
pi_k = np.random.rand(3)
pi_k /= np.sum(pi_k) # 正規化
# 初期値による対数尤度を計算:式(9.14)
term_dens_nk = np.array(
[multivariate_normal.pdf(x=x_nd, mean=mu_kd[k], cov=sigma2_kdd[k]) for k in range(K)]
).T
L = np.sum(np.log(np.sum(term_dens_nk, axis=1)))
#%%
# 初期値による混合分布を計算
init_dens = 0
for k in range(K):
# クラスタkの分布の確率密度を計算
tmp_dens = multivariate_normal.pdf(
x=x_point_arr, mean=mu_kd[k], cov=sigma2_kdd[k]
)
# K個の分布線形結合
init_dens += pi_k[k] * tmp_dens
# 初期値による分布を作図
plt.figure(figsize=(12, 9))
plt.contour(x_1_grid, x_2_grid, model_dens.reshape(x_dim),
alpha=0.5, linestyles='dashed') # 真の分布
#plt.contour(x_1_grid, x_2_grid, init_dens.reshape(x_dim)) # 推定値による分布:(等高線)
plt.contourf(x_1_grid, x_2_grid, init_dens.reshape(x_dim), alpha=0.6) # 推定値による分布:(塗りつぶし)
plt.suptitle('Mixture of Gaussians', fontsize=20)
plt.title('iter:' + str(0) + ', K=' + str(K), loc='left')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.colorbar() # 等高線の色
plt.show()
#%%
## 推論処理
# 試行回数を指定
MaxIter = 100
# 推移の確認用の受け皿を作成
trace_L_i = [L]
trace_gamma_ink = [np.tile(np.nan, reps=(N, K))]
trace_mu_ikd = [mu_kd.copy()]
trace_sigma2_ikdd = [sigma2_kdd.copy()]
trace_pi_ik = [pi_k.copy()]
# 最尤推定
for i in range(MaxIter):
# 負担率を計算:式(9.13)
gamma_nk = np.array(
[multivariate_normal.pdf(x=x_nd, mean=mu_kd[k], cov=sigma2_kdd[k]) for k in range(K)]
).T
gamma_nk /= np.sum(gamma_nk, axis=1, keepdims=True) # 正規化
# 各クラスタとなるデータ数の期待値を計算:式(9.18)
N_k = np.sum(gamma_nk, axis=0)
for k in range(K):
# 平均パラメータの最尤解を計算:式(9.17)
mu_kd[k] = np.dot(gamma_nk[:, k], x_nd) / N_k[k]
# 共分散行列の最尤解を計算:式(9.19)
term_x_nd = x_nd - mu_kd[k]
sigma2_kdd[k] = np.dot(gamma_nk[:, k] * term_x_nd.T, term_x_nd) / N_k[k]
# 混合係数の最尤解を計算:式(9.22)
pi_k = N_k / N
# 対数尤度を計算:式(9.14)
tmp_dens_nk = np.array(
[multivariate_normal.pdf(x=x_nd, mean=mu_kd[k], cov=sigma2_kdd[k]) for k in range(K)]
).T
L = np.sum(np.log(np.sum(tmp_dens_nk, axis=1)))
# i回目の結果を記録
trace_L_i.append(L)
trace_gamma_ink.append(gamma_nk.copy())
trace_mu_ikd.append(mu_kd.copy())
trace_sigma2_ikdd.append(sigma2_kdd.copy())
trace_pi_ik.append(pi_k.copy())
# 動作確認
print(str(i + 1) + ' (' + str(np.round((i + 1) / MaxIter * 100, 1)) + '%)')
#%%
## 推論結果の確認
# K個のカラーマップを指定
colormap_list = ['Blues', 'Oranges', 'Greens']
# 負担率が最大のクラスタ番号を抽出
z_n = np.argmax(gamma_nk, axis=1)
# 割り当てられたクラスタとなる負担率(確率)を抽出
prob_z_n = gamma_nk[np.arange(N), z_n]
# 最後の更新値による混合分布を計算
res_dens = 0
for k in range(K):
# クラスタkの分布の確率密度を計算
tmp_dens = multivariate_normal.pdf(
x=x_point_arr, mean=mu_kd[k], cov=sigma2_kdd[k]
)
# K個の分布を線形結合
res_dens += pi_k[k] * tmp_dens
# 最後の更新値による分布を作図
plt.figure(figsize=(12, 9))
plt.contour(x_1_grid, x_2_grid, model_dens.reshape(x_dim),
alpha=0.5, linestyles='dashed') # 真の分布
plt.scatter(x=mu_truth_kd[:, 0], y=mu_truth_kd[:, 1],
color='red', s=100, marker='x') # 真の平均
plt.contourf(x_1_grid, x_2_grid, res_dens.reshape(x_dim), alpha=0.5) # 推定値による分布:(塗りつぶし)
for k in range(K):
k_idx, = np.where(z_n == k) # クラスタkのデータのインデックス
cm = plt.get_cmap(colormap_list[k]) # クラスタkのカラーマップを設定
plt.scatter(x=x_nd[k_idx, 0], y=x_nd[k_idx, 1],
c=[cm(p) for p in prob_z_n[k_idx]], label='cluster:' + str(k + 1)) # 負担率によるクラスタ
#plt.contour(x_1_grid, x_2_grid, res_dens.reshape(x_dim)) # 推定値による分布:(等高線)
#plt.colorbar() # 等高線の値:(等高線用)
plt.suptitle('Mixture of Gaussians:Maximum Likelihood', fontsize=20)
plt.title('$iter:' + str(MaxIter) +
', L=' + str(np.round(L, 1)) +
', N=' + str(N) +
', \pi=[' + ', '.join([str(pi) for pi in np.round(pi_k, 3)]) + ']$',
loc='left')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.legend()
plt.show()
#%%
## 対数尤度の推移を作図
plt.figure(figsize=(12, 9))
plt.plot(np.arange(MaxIter + 1), np.array(trace_L_i))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Maximum Likelihood', fontsize=20)
plt.title('Log Likelihood', loc='left')
plt.grid() # グリッド線
plt.show()
#%%
## パラメータの更新値の推移の確認
# muの推移を作図
plt.figure(figsize=(12, 9))
plt.hlines(y=mu_truth_kd, xmin=0, xmax=MaxIter + 1, label='true val',
color='red', linestyles='--') # 真の値
for k in range(K):
for d in range(D):
plt.plot(np.arange(MaxIter+1), np.array(trace_mu_ikd)[:, k, d],
label='k=' + str(k + 1) + ', d=' + str(d + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Maximum Likelihood', fontsize=20)
plt.title('$\mu$', loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# lambdaの推移を作図
plt.figure(figsize=(12, 9))
plt.hlines(y=sigma2_truth_kdd, xmin=0, xmax=MaxIter + 1, label='true val',
color='red', linestyles='--') # 真の値
for k in range(K):
for d1 in range(D):
for d2 in range(D):
plt.plot(np.arange(MaxIter + 1), np.array(trace_sigma2_ikdd)[:, k, d1, d2],
alpha=0.5, label='k=' + str(k + 1) + ', d=' + str(d1 + 1) + ', d''=' + str(d2 + 1))
plt.xlabel('iteration')
plt.ylabel('value')
plt.suptitle('Maximum Likelihood', fontsize=20)
plt.title('$\Sigma$', loc='left')
plt.legend() # 凡例
plt.grid() # グリッド線
plt.show()
#%%
# piの推移を作図
plt.figure(figsize=(12, 9))
plt.hlines(y=pi_truth_k, xmin=0, xmax=MaxIter + 1, label='true val',
color='red', linestyles='--') # 真の値
for k in range(K):
plt.plot( | np.arange(MaxIter + 1) | numpy.arange |
#!/usr/bin/env python3
import numpy as np
import math
from typing import List
class HighLevelPlanner():
def __init__(self,
num_runs: int = 10,
num_goals_per_run: int = 5,
world_box: List[float] = [-15, -15, 0, 15, 15, 8],
min_distance_between_consecutive_goals: int = 15,
switch_goal_distance: float = 3.0,
):
self.num_runs = num_runs
self.switch_goal_distance = switch_goal_distance
self.goals_matrix = np.zeros([num_runs, num_goals_per_run, 3], dtype=np.float32)
self.velocities_matrix = np.zeros([num_runs, num_goals_per_run], dtype=np.float32)
np.random.seed(0)
self.world_box = np.asarray(world_box, dtype=np.float32)
self.world_box[:3] = self.world_box[:3] + 1.0
self.world_box[3:] = self.world_box[3:] - 1.0
# Need bigger world box than actual size so that some goals are outside.
self.world_box_dim_factors = np.zeros([3], dtype=np.float32)
self.world_box_dim_factors[0] = (self.world_box[3] - self.world_box[0]) / 2.0
self.world_box_dim_factors[1] = (self.world_box[4] - self.world_box[1]) / 2.0
self.world_box_dim_factors[2] = (self.world_box[5] - self.world_box[2])
self.distance_to_avoid_corners = np.sqrt(self.world_box_dim_factors[0]**2 + self.world_box_dim_factors[1]**2 + (self.world_box_dim_factors[2]/2)**2)*0.9
self.num_goals_per_run = num_goals_per_run
self.internal_state = 0 # Will be used to keep track of goal numbers
self.goal_reached_number = 0
self.populate_goals_matrix(num_runs, num_goals_per_run, min_distance_between_consecutive_goals)
def populate_goals_matrix(self, num_runs, num_goals_per_run, min_distance_between_consecutive_goals):
for run_idx in range(num_runs):
for goal_idx in range(num_goals_per_run):
good_goal = False
while not good_goal:
random_goal_position = self.draw_random_position()
if (goal_idx==0): # if first goal of episode we only care is far from center [0, 0, 0]
goals_relative_position = random_goal_position
else: # else we care two consecutive goals are not too close to each other
goals_relative_position = random_goal_position - self.goals_matrix[run_idx, goal_idx-1, :]
if(np.linalg.norm(goals_relative_position) > min_distance_between_consecutive_goals and np.linalg.norm(goals_relative_position) < self.distance_to_avoid_corners):
good_goal = True
self.goals_matrix[run_idx, goal_idx, :] = random_goal_position
def draw_random_position(self):
random_values = np.random.rand(3)
random_values[0:2] = random_values[0:2]*2 - 1 # Uniform distribution from -1 and 1
return np.multiply(random_values, self.world_box_dim_factors)
def get_current_goal(self, drone_position, num_run):
goal_relative_position = self.goals_matrix[num_run, self.internal_state, :] - drone_position
goal_relative_position_norm = | np.linalg.norm(goal_relative_position) | numpy.linalg.norm |
import copy
import h5py
from pathlib import Path
import pandas as pd
from util import print_datetime, parseIiter, array2string, load_dict_from_hdf5_group, dict_to_list
import numpy as np
from sklearn.metrics import calinski_harabasz_score, silhouette_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import StandardScaler
import umap
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import BoundaryNorm, ListedColormap
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from scipy.stats import pearsonr
# sns.set_style("white")
# plt.rcParams['font.family'] = "Liberation Sans"
# plt.rcParams['font.size'] = 16
# plt.rcParams['pdf.fonttype'] = 42
# plt.rcParams['svg.fonttype'] = 'none'
from load_data import load_expression
from model import SpiceMix
from pathlib import Path
class SpiceMixResult:
"""Provides methods to interpret a SpiceMix result.
"""
def __init__(self, path2dataset, result_filename, neighbor_suffix=None, expression_suffix=None, showHyperparameters=False):
self.path2dataset = Path(path2dataset)
self.result_filename = result_filename
print(f'Result file = {self.result_filename}')
self.load_progress()
self.load_hyperparameters()
self.load_dataset()
self.num_repli = len(self.dataset["replicate_names"])
self.use_spatial = [True] * self.num_repli
self.load_parameters()
self.weight_columns = np.array([f'Metagene {metagene}' for metagene in range(self.hyperparameters["K"])])
self.columns_exprs = np.array(self.dataset["gene_sets"][self.dataset["replicate_names"][0]])
self.data = pd.DataFrame(index=range(sum(self.dataset["Ns"])))
self.data[['x', 'y']] = np.concatenate([load_expression(self.path2dataset / 'files' / f'coordinates_{replicate}.txt') for replicate in self.dataset["replicate_names"]], axis=0)
# self.data['cell type'] = np.concatenate([
# np.loadtxt(self.path2dataset / 'files' / f'celltypes_{repli}.txt', dtype=str)
# for repli in self.replicate_names
# ], axis=0)
self.data["replicate"] = sum([[replicate] * N for replicate, N in zip(self.dataset["replicate_names"], self.dataset["Ns"])], [])
print(self.columns_exprs)
self.columns_exprs = [" ".join(symbols) for symbols in self.columns_exprs]
print(self.columns_exprs)
self.data[self.columns_exprs] = np.concatenate(self.dataset["unscaled_YTs"], axis=0)
if "labels" in self.dataset:
self.dataset["labels"] = dict_to_list(self.dataset["labels"])
self.data["label"] = np.concatenate(self.dataset["labels"])
self.colors = {}
self.orders = {}
self.metagene_order = np.arange(self.hyperparameters["K"])
def load_hyperparameters(self):
with h5py.File(self.result_filename, 'r') as f:
self.hyperparameters = load_dict_from_hdf5_group(f, 'hyperparameters/')
def load_progress(self):
with h5py.File(self.result_filename, 'r') as f:
self.progress = load_dict_from_hdf5_group(f, 'progress/')
self.progress["Q"] = dict_to_list(self.progress["Q"])
def load_parameters(self):
with h5py.File(self.result_filename, 'r') as f:
self.parameters = load_dict_from_hdf5_group(f, 'parameters/')
self.parameters["sigma_x_inverse"] = dict_to_list(self.parameters["sigma_x_inverse"])
self.parameters["M"] = dict_to_list(self.parameters["M"])
self.parameters["sigma_yx_inverses"] = dict_to_list(self.parameters["sigma_yx_inverses"])
self.parameters["prior_x_parameter"] = dict_to_list(self.parameters["prior_x_parameter"])
def load_dataset(self):
with h5py.File(self.result_filename, 'r') as f:
self.dataset = load_dict_from_hdf5_group(f, 'dataset/')
self.dataset["Es"] = {int(node): adjacency_list for node, adjacency_list in self.dataset["Es"].items()}
self.dataset["unscaled_YTs"] = dict_to_list(self.dataset["unscaled_YTs"])
self.dataset["YTs"] = dict_to_list(self.dataset["YTs"])
for replicate_index, replicate_name in enumerate(self.dataset["gene_sets"]):
self.dataset["gene_sets"][replicate_name] = np.loadtxt(Path(self.path2dataset) / "files" / f"genes_{replicate_name}.txt", dtype=str)
# np.char.decode(self.dataset["gene_sets"][replicate_name], encoding="utf-8")
replicate_index = str(replicate_index)
if "labels" in self.dataset:
self.dataset["labels"][replicate_index] = np.char.decode(self.dataset["labels"][replicate_index], encoding="utf-8")
self.dataset["Ns"], self.dataset["Gs"] = zip(*map(np.shape, self.dataset["unscaled_YTs"]))
self.dataset["max_genes"] = max(self.dataset["Gs"])
self.dataset["total_edge_counts"] = [sum(map(len, E.values())) for E in self.dataset["Es"].values()]
self.dataset["replicate_names"] = [replicate_name.decode("utf-8") for replicate_name in self.dataset["replicate_names"]]
# self.scaling = [G / self.dataset["max_genes"] * self.hyperparameters["K"] / YT.sum(axis=1).mean() for YT, G in zip(self.dataset["YTs"], self.dataset["Gs"])]
if "scaling" not in self.dataset:
self.dataset["scaling"] = [G / self.dataset["max_genes"] * self.hyperparameters["K"] / YT.sum(axis=1).mean() for YT, G in zip(self.dataset["YTs"], self.dataset["Gs"])]
def plot_convergence(self, ax, **kwargs):
label = kwargs.pop("label", "")
with h5py.File(self.result_filename, 'r') as f:
Q_values = load_dict_from_hdf5_group(f, 'progress/Q')
iterations = np.fromiter(map(int, Q_values.keys()), dtype=int)
selected_Q_values = np.fromiter((Q_values[step][()] for step in iterations.astype(str)), dtype=float)
Q = np.full(iterations.max() - iterations.min() + 1, np.nan)
Q[iterations - iterations.min()] = selected_Q_values
print(f'Found {iterations.max()} iterations from {self.result_filename}')
ax.set_title('Q Score')
ax.set_xlabel('Iteration')
ax.set_ylabel('$\Delta$Q')
ax.set_yscale('log')
ax.set_ylim(10**-1, 10**3)
ax.legend()
for interval, linestyle in zip([1, 5], ['-', ':']):
dQ = (Q[interval:] - Q[:-interval]) / interval
ax.plot(np.arange(iterations.min(), iterations.max() + 1 - interval) + interval / 2 + 1, dQ, linestyle=linestyle, label="{}-iteration $\Delta$Q ({})".format(interval, label), **kwargs)
def load_latent_states(self, iiter=-1):
with h5py.File(self.result_filename, 'r') as f:
# iiter = parseIiter(f[f'latent_states/XT/{self.replicate_names[0]}'], iiter)
print(f'Iteration {iiter}')
self.weights = load_dict_from_hdf5_group(f, "weights/")
self.weights = dict_to_list(self.weights)
# XTs = [f[f'latent_states/XT/{repli}/{iiter}'][()] for repli in self.replicate_names]
# XTs = [XT/ YT for XT, YT in zip(XTs, self.dataset["YTs"])]
self.data[self.weight_columns] = np.concatenate([self.weights[replicate_index][iiter] / scale for replicate_index, scale in zip(range(self.num_repli), self.dataset["scaling"])])
def determine_optimal_clusters(self, ax, K_range, metric="callinski_harabasz"):
XTs = self.data[self.weight_columns].values
XTs = StandardScaler().fit_transform(XTs)
K_range = np.array(K_range)
if metric == "callinski_harabasz":
scores = np.fromiter((calinski_harabasz_score(XTs, self.determine_clusters(K)) for K in K_range), dtype=float)
elif metric == "silhouette":
scores = np.fromiter((silhouette_score(XTs, self.determine_clusters(K)) for K in K_range), dtype=float)
optimal_K = K_range[scores.argmax()]
print(f'optimal K = {optimal_K}')
labels = self.determine_clusters(optimal_K)
num_clusters = len(set(labels) - {-1})
print(f'#clusters = {num_clusters}, #-1 = {(labels == -1).sum()}')
ax.scatter(K_range, scores, marker='x', color=np.where(K_range == optimal_K, 'C1', 'C0'))
def determine_clusters(self, K, features=None, replicate=None):
data = self.data
if replicate:
replicate_mask = (data["replicate"] == replicate)
data = data.loc[replicate_mask]
if not features:
features = self.weight_columns
XTs = data[features].values
cluster_labels = AgglomerativeClustering(
n_clusters=K,
linkage='ward',
).fit_predict(XTs)
if replicate:
self.data.loc[replicate_mask, 'cluster_raw'] = cluster_labels
self.data.loc[replicate_mask, 'cluster'] = list(map(str, cluster_labels))
else:
self.data.loc[:, 'cluster_raw'] = cluster_labels
self.data.loc[:, 'cluster'] = list(map(str, cluster_labels))
return cluster_labels
def annotateClusters(self, clusteri2a):
self.data['cluster'] = [clusteri2a[cluster_name] for cluster_name in self.data['cluster_raw']]
def assignColors(self, key, mapping):
assert set(mapping.keys()) >= set(self.data[key])
self.colors[key] = copy.deepcopy(mapping)
def assignOrder(self, key, order):
categories = set(self.data[key])
assert set(order) >= categories and len(order) == len(set(order))
order = list(filter(lambda category: category in categories, order))
self.orders[key] = np.array(order)
def UMAP(self, **kwargs):
XTs = self.data[self.weight_columns].values
XTs = StandardScaler().fit_transform(XTs)
XTs = umap.UMAP(**kwargs).fit_transform(XTs)
self.data[[f'UMAP {i+1}' for i in range(XTs.shape[1])]] = XTs
def plot_feature(self, ax, key, key_x='UMAP 1', key_y='UMAP 2', replicate=None, show_colorbar=True, **kwargs):
# We ovrlap latent states on the spatial space
# SpiceMix metagenes are expected to show clearer spatial patterns with less background expressions
segmentdata = copy.deepcopy(plt.get_cmap('Reds')._segmentdata)
segmentdata['red' ][0] = (0., 1., 1.)
segmentdata['green'][0] = (0., 1., 1.)
segmentdata['blue' ][0] = (0., 1., 1.)
cmap = LinearSegmentedColormap('', segmentdata=segmentdata, N=256)
if isinstance(replicate, int):
replicate = self.dataset["replicate_names"][replicate]
if replicate:
data = self.data.groupby('replicate').get_group(replicate)
else:
data = self.data
if data[key].dtype == 'O':
kwargs.setdefault('hue_order', self.orders.get(key, None))
kwargs.setdefault('palette', self.colors.get(key, None))
sns.scatterplot(ax=ax, data=data, x=key_x, y=key_y, hue=key, **kwargs)
else:
kwargs.setdefault('cmap', cmap)
sca = ax.scatter(data[key_x], data[key_y], c=data[key], **kwargs)
if show_colorbar:
cbar = plt.colorbar(sca, ax=ax, pad=.01, shrink=1, aspect=40)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left")
ax.tick_params(axis='both', labelsize=10)
def plot_aggregated_feature(self, ax, keys, key_x="x", key_y="y", replicate=None, show_colorbar=True, **kwargs):
if isinstance(replicate, int):
replicate = self.dataset["replicate_names"][replicate]
if replicate:
data = self.data.groupby('replicate').get_group(replicate)
else:
data = self.data
sca = ax.scatter(data[key_x], data[key_y], c=data[keys].sum(axis="columns"), **kwargs)
if show_colorbar:
cbar = plt.colorbar(sca, ax=ax, pad=.01, shrink=1, aspect=40)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(axis='both', labelsize=10)
def plot_metagenes(self, axes, replicate, *args, **kwargs):
keys = np.array(self.weight_columns)
keys = keys[self.metagene_order]
self.plot_multifeature(axes, keys, replicate, **kwargs)
def plot_multifeature(self, axes, keys, replicate, key_x='x', key_y='y', show_colorbar=True, *args, **kwargs):
"""Plot multiple SpiceMixResult features on the provided axes for a given replicate.
"""
for ax, key in zip(axes.flat, keys):
self.plot_feature(ax, key, key_x, key_y, replicate, show_colorbar=show_colorbar, *args, **kwargs)
ax.set_title(key)
def plot_multireplicate(self, axes, key, key_x="x", key_y="y", palette_option="husl", *args, **kwargs):
categories = self.data[key].unique()
category_map = {category: index for index, category in enumerate(categories)}
num_categories = len(categories)
palette = sns.color_palette(palette_option, num_categories)
sns.set_palette(palette)
colormap = ListedColormap(palette)
bounds = np.linspace(0, num_categories, num_categories + 1)
norm = BoundaryNorm(bounds, colormap.N)
for ax, replicate in zip(axes.flat, self.dataset["replicate_names"]):
if replicate not in self.data["replicate"].values:
ax.axis('off')
continue
subdata = self.data.groupby("replicate").get_group(replicate).groupby(key)
for subkey, group in subdata:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=subkey, color=colormap(category_map[subkey]), **kwargs)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_title(replicate)
ax.get_legend().remove()
ax.set(adjustable='box', aspect='equal')
legend_axis = axes.flat[-1]
legend_axis.set_title("Legend")
legend_axis.imshow(np.arange(num_categories)[:, np.newaxis], cmap=colormap, aspect=1)
legend_axis.set_xticks([])
legend_axis.set_yticks(np.arange(num_categories))
legend_axis.set_yticklabels(categories)
plt.tight_layout()
def calculate_metagene_correlations(self, replicate, benchmark, comparison_features):
correlations = pd.DataFrame(index=self.weight_columns, columns=comparison_features)
replicate_data = self.data.groupby("replicate").get_group(replicate)
for feature in comparison_features:
feature_values = benchmark[feature]
for metagene in self.weight_columns:
correlation = pearsonr(replicate_data[metagene].values, feature_values.values)[0]
correlations.loc[metagene, feature] = correlation
return correlations
def calculate_ari_score(self, replicate=None):
data = self.data
if replicate:
data = data[data["replicate"] == replicate]
label_values, label_indices, label_encoded = np.unique(data["label"], return_index=True, return_inverse=True)
cluster_values, cluster_indices, cluster_encoded = np.unique(data["cluster"], return_index=True, return_inverse=True)
ari = adjusted_rand_score(label_encoded, cluster_encoded)
return ari
def plot_ari_versus_clusters(self, ax, K_range):
"""Plot ARI score as a function of the number of clusters used in K-means clustering.
"""
XTs = self.data[self.weight_columns].values
XTs = StandardScaler().fit_transform(XTs)
K_range = | np.array(K_range) | numpy.array |
#! /usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import range
import numpy as np
import math
import os.path
import dens
import plot2d as p2d
import tqdm # progress bar
import platform
import argparse
import warnings
XRAY_WAVELENGTH = 1.54
TRANSFORM_MONOCLINIC = True
def initialize():
parser = argparse.ArgumentParser(description='Calculate 3d Structure Factor')
parser.add_argument('-i', '--input', default='', type=str, help='Input topolgy and trajectory basename')
parser.add_argument('-top', '--topology', default='', type=str, help='Input topolgy filename')
parser.add_argument('-traj', '--trajectory', default='', type=str, help='Input trajectory filename')
parser.add_argument('--cscale', default=1, type=float, help='Scale color map on plots')
parser.add_argument('--lcscale', default=1, type=float, help='Scale color map on log plots')
parser.add_argument('-fi', '--first_frame', default=0, type=int, help='frame to start at')
parser.add_argument('-e', '--end_frame', default=-1, type=int, help='frame to end at')
parser.add_argument("-tf","--traj_format",default="gromacs",type=str,help='format of MD trajectory file to load')
parser.add_argument('-fr', '--force_recompute', default=0, type=int, help='force recomputing SF (if >=1) or '
'trajectory and SF(if >=2)')
# random trajectory parameters
parser.add_argument('-RC', '--random_counts', default=0, type=int, help='set this to specify number of random '
'particles to use')
parser.add_argument('-RT', '--random_timesteps', default=1, type=int, help='set this to specify number of random '
'timesteps to use')
parser.add_argument('-RL', '--random_label', default="R3", type=str, help='set this to specify the element type of '
'the random particle')
parser.add_argument('-LX', '--lattice_x', default=0, type=int, help='set this to specify the number of lattice '
'points in the X direction')
parser.add_argument('-LY', '--lattice_y', default=1, type=int, help='set this to specify the number of lattice '
'points in the Y direction')
parser.add_argument('-LZ', '--lattice_z', default=1, type=int, help='set this to specify the number of lattice '
'points in the Z direction')
parser.add_argument('-LL', '--lattice_label', default="R3", type=str, help='set this to specify the element label of'
' lattice particles')
parser.add_argument('-SR', '--spatial_resolution', default=1.0, type=float,help='set this to specify the spatial '
'resolution for the density grid')
parser.add_argument('-RN', '--random_noise', default=0, type=int,help='set this to a positive value to use random '
'noise for testing. A conventional trajectory must still be loaded. The number of timesteps to '
'be used will be scaled by this integer')
parser.add_argument('-RS', '--random_seed', default=1, type=int, help='Set the random seed from the command line')
parser.add_argument('-NBR', '--number_bins_rad', default=0, type=int,help='Set this to a nonzero value to use that'
'many radial bins. These bins will be scaled such that they contain roughly the same number of '
'points.')
parser.add_argument('-ct', '--cell_theta', default=120, type=float, help="choose cell theta (in degrees)")
parser.add_argument('-nocbar', '--nocolorbar', action="store_true", help="Choose whether to plot colorbar")
parser.add_argument('-scale_factor', default=3.1, type=float, help='Maximum colorbar value on final simulated'
'XRD pattern')
parser.add_argument('-manuscript_format', action="store_true", help='Format plots as they appear in Coscia et al.'
'Manuscript.')
return parser
if __name__ == "__main__":
args = initialize().parse_args()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) # Directory this script is in
warnings.simplefilter("ignore", RuntimeWarning)
theta = args.cell_theta*math.pi/180.0 # theta for monoclinic unit cell
ucell = np.array([[1, 0, 0], [np.cos(theta), np.sin(theta), 0], [0, 0, 1]])
np.random.seed = args.random_seed # args.random_noise
p2d.NBINSRAD = args.number_bins_rad
p2d.theta = theta
dens.theta = theta
if args.random_noise > 0:
dens.RANDOM_NOISE = args.random_noise
if args.nocolorbar:
p2d.colorbar = False
top_extension = {"gromacs": ".gro", "namd": ".psf"}
traj_extension = {"gromacs": ".trr", "namd": ".dcd"}
if len(args.input) > 0:
basename = args.input
top_file = args.input+top_extension[args.traj_format.lower()]
traj_file = args.input+traj_extension[args.traj_format.lower()]
else:
top_file = args.topology
traj_file = args.trajectory
basename = args.topology.rsplit('.', 1)[0]
print("running on", platform.system(), platform.release(), platform.version())
if platform.system() == "Windows": # path separators
fd = "\\"
else:
import load_traj as lt
fd = "/"
label = "out_"+basename
tfname = label+"_traj"
sfname = label+"_sf"
Nlat = args.lattice_x * args.lattice_y * args.lattice_z
if Nlat > 0:
boxsize = 100.0
Nx = args.lattice_x
Ny = args.lattice_y
Nz = args.lattice_z
Nsteps = 1
dims = np.ones((Nsteps, 3))*boxsize
coords = np.zeros((Nsteps, Nlat, 3))
cbufx = np.linspace(0, boxsize, Nx, endpoint=False)
cbufy = np.linspace(0, boxsize, Ny, endpoint=False)
cbufz = np.linspace(0, boxsize, Nz, endpoint=False)
cbx, cby, cbz = np.meshgrid(cbufx, cbufy, cbufz)
coords[0, :, 0] = cbx.reshape((Nlat))
coords[0, :, 1] = cby.reshape((Nlat))
coords[0, :, 2] = cbz.reshape((Nlat))
sfname = 'lattice_'+str(Nx)+"_"+str(Ny)+"_"+str(Nz)
name = np.zeros(Nlat, dtype=object)
mass = np.zeros(Nlat)
typ = np.zeros(Nlat, dtype=object)
typ[:]=args.lattice_label
print("saving...")
np.savez_compressed(sfname, dims=dims, coords=coords, name=name, typ=typ)
rad = dens.load_radii("%s/radii.txt" % location) # load radii definitions from file
print("computing SF...")
dens.compute_sf(coords, dims, typ, sfname, rad, ucell, args.spatial_resolution) # compute time-averaged 3d structure factor and save to sfname.npz
elif args.random_counts > 0: # create a random trajectory
spcheck = 0 # check if simulating a single particle.
Rboxsize = 100.0
BUFFsize = 10000000
sfname = 'RND'
print("generating random trajectory...")
Rsteps = args.random_timesteps
Ratoms = args.random_counts
dims = | np.ones((Rsteps, 3)) | numpy.ones |
import tensorflow as tf
from datasets import gtsrb
import numpy as np
import cv2
def normalize(x):
one_size = cv2.resize(x, (32, 32)).astype(np.float32) / 255
return one_size - | np.mean(one_size) | numpy.mean |
import numpy as np # pip3 install numpy
import scipy # pip3 install scipy
import scipy.ndimage as snd
import reikna.fft, reikna.cluda # pip3 install pyopencl/pycuda, reikna
from PIL import Image, ImageTk, ImageDraw # pip3 install pillow
try: import tkinter as tk
except: import Tkinter as tk
from fractions import Fraction
import copy, re, itertools, json, csv
import os, sys, subprocess, datetime, time
import warnings
warnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()
P2, PIXEL_BORDER = 0,0 # 4,2 3,1 2,1 0,0
X2, Y2 = 9,9 # 10,9 9,8 8,8 1<<9=512
PIXEL = 1 << P2; SIZEX, SIZEY = 1 << (X2-P2), 1 << (Y2-P2)
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1280//PIXEL, 720//PIXEL # 720p HD
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1920//PIXEL, 1080//PIXEL # 1080p HD
MIDX, MIDY = int(SIZEX / 2), int(SIZEY / 2)
DEF_R = max(min(SIZEX, SIZEY) // 4 //5*5, 13)
EPSILON = 1e-10
ROUND = 10
FPS_FREQ = 20
STATUS = []
is_windows = (os.name == 'nt')
class Board:
def __init__(self, size=[0,0]):
self.names = ['', '', '']
self.params = {'R':DEF_R, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}
self.cells = np.zeros(size)
@classmethod
def from_values(cls, names, params, cells):
self = cls()
self.names = names.copy() if names is not None else None
self.params = params.copy() if params is not None else None
self.cells = cells.copy() if cells is not None else None
return self
@classmethod
def from_data(cls, data):
self = cls()
self.names = [data.get('code',''), data.get('name',''), data.get('cname','')]
self.params = data.get('params')
if self.params:
self.params = self.params.copy()
self.params['b'] = Board.st2fracs(self.params['b'])
self.cells = data.get('cells')
if self.cells:
if type(self.cells) in [tuple, list]:
self.cells = ''.join(self.cells)
self.cells = Board.rle2arr(self.cells)
return self
def to_data(self, is_shorten=True):
rle_st = Board.arr2rle(self.cells, is_shorten)
params2 = self.params.copy()
params2['b'] = Board.fracs2st(params2['b'])
data = {'code':self.names[0], 'name':self.names[1], 'cname':self.names[2], 'params':params2, 'cells':rle_st}
return data
def params2st(self):
params2 = self.params.copy()
params2['b'] = '[' + Board.fracs2st(params2['b']) + ']'
return ','.join(['{}={}'.format(k,str(v)) for (k,v) in params2.items()])
def long_name(self):
# return ' | '.join(filter(None, self.names))
return '{0} - {1} {2}'.format(*self.names)
@staticmethod
def arr2rle(A, is_shorten=True):
''' RLE = Run-length encoding:
http://www.conwaylife.com/w/index.php?title=Run_Length_Encoded
http://golly.sourceforge.net/Help/formats.html#rle
https://www.rosettacode.org/wiki/Run-length_encoding#Python
0=b=. 1=o=A 1-24=A-X 25-48=pA-pX 49-72=qA-qX 241-255=yA-yO '''
V = np.rint(A*255).astype(int).tolist() # [[255 255] [255 0]]
code_arr = [ [' .' if v==0 else ' '+chr(ord('A')+v-1) if v<25 else chr(ord('p')+(v-25)//24) + chr(ord('A')+(v-25)%24) for v in row] for row in V] # [[yO yO] [yO .]]
if is_shorten:
rle_groups = [ [(len(list(g)),c.strip()) for c,g in itertools.groupby(row)] for row in code_arr] # [[(2 yO)] [(1 yO) (1 .)]]
for row in rle_groups:
if row[-1][1]=='.': row.pop() # [[(2 yO)] [(1 yO)]]
st = '$'.join(''.join([(str(n) if n>1 else '')+c for n,c in row]) for row in rle_groups) + '!' # "2 yO $ 1 yO"
else:
st = '$'.join(''.join(row) for row in code_arr) + '!'
# print(sum(sum(r) for r in V))
return st
@staticmethod
def rle2arr(st):
rle_groups = re.findall('(\d*)([p-y]?[.boA-X$])', st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]
code_list = sum([[c] * (1 if n=='' else int(n)) for n,c in rle_groups], []) # [yO yO $ yO]
code_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]
V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row if c!='' ] for row in code_arr] # [[255 255] [255]]
# lines = st.rstrip('!').split('$')
# rle = [re.findall('(\d*)([p-y]?[.boA-X])', row) for row in lines]
# code = [ sum([[c] * (1 if n=='' else int(n)) for n,c in row], []) for row in rle]
# V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row ] for row in code]
maxlen = len(max(V, key=len))
A = np.array([row + [0] * (maxlen - len(row)) for row in V])/255 # [[1 1] [1 0]]
# print(sum(sum(r) for r in V))
return A
@staticmethod
def fracs2st(B):
return ','.join([str(f) for f in B])
@staticmethod
def st2fracs(st):
return [Fraction(st) for st in st.split(',')]
def clear(self):
self.cells.fill(0)
def add(self, part, shift=[0,0]):
# assert self.params['R'] == part.params['R']
h1, w1 = self.cells.shape
h2, w2 = part.cells.shape
h, w = min(h1, h2), min(w1, w2)
i1, j1 = (w1 - w)//2 + shift[1], (h1 - h)//2 + shift[0]
i2, j2 = (w2 - w)//2, (h2 - h)//2
# self.cells[j:j+h, i:i+w] = part.cells[0:h, 0:w]
vmin = np.amin(part.cells)
for y in range(h):
for x in range(w):
if part.cells[j2+y, i2+x] > vmin:
self.cells[(j1+y)%h1, (i1+x)%w1] = part.cells[j2+y, i2+x]
return self
def transform(self, tx, mode='RZSF', is_world=False):
if 'R' in mode and tx['rotate'] != 0:
self.cells = snd.rotate(self.cells, tx['rotate'], reshape=not is_world, order=0, mode='wrap' if is_world else 'constant')
if 'Z' in mode and tx['R'] != self.params['R']:
# print('* {} / {}'.format(tx['R'], self.params['R']))
shape_orig = self.cells.shape
self.cells = snd.zoom(self.cells, tx['R'] / self.params['R'], order=0)
if is_world:
self.cells = Board(shape_orig).add(self).cells
self.params['R'] = tx['R']
if 'F' in mode and tx['flip'] != -1:
if tx['flip'] in [0,1]: self.cells = np.flip(self.cells, axis=tx['flip'])
elif tx['flip'] == 2: self.cells[:, :-MIDX-1:-1] = self.cells[:, :MIDX]
elif tx['flip'] == 3: self.cells[:, :-MIDX-1:-1] = self.cells[::-1, :MIDX]
if 'S' in mode and tx['shift'] != [0, 0]:
self.cells = snd.shift(self.cells, tx['shift'], order=0, mode='wrap')
# self.cells = np.roll(self.cells, tx['shift'], (1, 0))
return self
def add_transformed(self, part, tx):
part = copy.deepcopy(part)
self.add(part.transform(tx, mode='RZF'), tx['shift'])
return self
def crop(self):
vmin = np.amin(self.cells)
coords = np.argwhere(self.cells > vmin)
y0, x0 = coords.min(axis=0)
y1, x1 = coords.max(axis=0) + 1
self.cells = self.cells[y0:y1, x0:x1]
return self
class Automaton:
kernel_core = {
0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)
1: lambda r: np.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)
2: lambda r, q=1/4: (r>=q)*(r<=1-q), # step (stpz1/4)
3: lambda r, q=1/4: (r>=q)*(r<=1-q) + (r<q)*0.5 # staircase (life)
}
field_func = {
0: lambda n, m, s: np.maximum(0, 1 - (n-m)**2 / (9 * s**2) )**4 * 2 - 1, # polynomial (quad4)
1: lambda n, m, s: np.exp( - (n-m)**2 / (2 * s**2) ) * 2 - 1, # exponential / gaussian (gaus)
2: lambda n, m, s: (np.abs(n-m)<=s) * 2 - 1 # step (stpz)
}
def __init__(self, world):
self.world = world
self.world_FFT = np.zeros(world.cells.shape)
self.potential_FFT = | np.zeros(world.cells.shape) | numpy.zeros |
import os
from os import listdir
from os.path import isfile,join,isdir
import numpy as np
from skimage import io
import timeit
from tkinter import *
from tkinter import filedialog
import platform
def Order_Holograms_After_Reconstruction(folderPath):
'''
Organizes the reconstruction folders into an array that can be called. The first axis is z-slices and the second axis is the time points. [z,t]
'''
#Finds folder names
Folder_Names = np.array([(f,float(f.name)) for f in os.scandir(folderPath)])
#Orders folder names
Ascending_Order = Folder_Names[Folder_Names[:,1].argsort()]
#Creates 2D array to place file path in first column and folder numeric value in second column
folder_names_combined = np.zeros((len(Ascending_Order),len(listdir(Ascending_Order[0,0].path)))).astype(np.unicode_)
for z_slice in range(len(Ascending_Order)):
#Finds file names of each time point for
File_Names = [File_Names for File_Names in listdir(Ascending_Order[z_slice,0].path) if isfile(join(Ascending_Order[z_slice,0],File_Names))]
#Splits apart the folder names ('-10.000' becomes ['-10','000'] )
z_slice_time_stamp_names = np.array([File_Names[time_point].split('.') for time_point in range(len(File_Names))])
#Puts the folder names in ascending numberic order
folder_names_combined[z_slice] = np.array([z_slice_time_stamp_names[z_slice_time_stamp_names[:,0].astype(np.float).argsort()][t_slice][0]+'.'+z_slice_time_stamp_names[z_slice_time_stamp_names[:,0].astype(np.float).argsort()][t_slice][1] for t_slice in range(len(File_Names))])
#Create a list of all file paths and folder names
Organized_Files = []
for x in range(len(folder_names_combined[:,0])):
for y in range(len(folder_names_combined[0,:])):
Organized_Files.append(Ascending_Order[x,0].path+'/'+str(folder_names_combined[x,y]))
#Converts the Organized_Files list into an array that is reshaped from a 1D array into a 2D array with rows equal to the number of z-slices and columns equal to the number of time points
Organized_Files = np.array(Organized_Files).reshape((len(folder_names_combined[:,0]),len(folder_names_combined[0,:])))
return Organized_Files,Ascending_Order
def Error_Checking():
'''
Determine if there are any errors in the entries that would not allow the program to run to completion with the input folder, folder output, and file names selected.
Errors_Found have 4 errors to check for:
0 - The input folder exists, there are only properly named subfolders in this folder, all files in the subfolders are properly named and tif or tiff files.
1 - The output folder exists
2 - Z Projection name is valid
3 - Z Location name is valid
If all of these checks pass, then the Error_Count will return a 0.
'''
Errors_Found = np.zeros(5)
'''
All error symbols are drawn to allow the program to properly remove them should the error be fixed. Without creating them, a tkinter variable error occurs.
'''
Overall_Error_Message_Canvas = canvas.create_window(Start_Processing_Horizontal.get(),Overal_Error_Spacing.get(),window=Overall_Error_Message)
Input_Error_Label_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Input_Spacing.get(),window=Input_Error_Label)
Output_Error_Label_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Output_Spacing.get(),window=Output_Error_Label)
Z_Value_Error_Label_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Z_Value_Spacing.get(),window=Z_Value_Error_Label)
Z_Loc_Error_Label_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Z_Loc_Spacing.get(),window=Z_Loc_Error_Label)
Input_Folder_Error_Canvas = canvas.create_window(Input_Output_Error_Horizontal.get(),Z_Value_Error_Spacing.get(),window=Input_Folder_Error)
Input_Subfolder_Error_Canvas = canvas.create_window(Input_Output_Error_Horizontal.get(),Z_Value_Error_Spacing.get(),window=Input_Subfolder_Error)
Input_Reconstruction_Files_Error_Canvas = canvas.create_window(Input_Output_Error_Horizontal.get(),Z_Value_Error_Spacing.get(),window=Input_Reconstruction_Files_Error)
Output_Directory_Folder_Error_Canvas = canvas.create_window(Input_Output_Error_Horizontal.get(),Z_Loc_Error_Spacing.get(),window=Output_Directory_Folder_Error)
Z_Value_Filename_Error_Canvas = canvas.create_window(Output_Filenames_Horizontal.get(),Z_Value_Error_Spacing.get(),window=Z_Value_Filename_Error)
Z_Loc_Filename_Error_Canvas = canvas.create_window(Output_Filenames_Horizontal.get(),Z_Loc_Error_Spacing.get(),window=Z_Loc_Filename_Error)
Projection_Output_Method_Error_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Pipeline_Menu_Spacing.get(),window=Projection_Output_Method_Error)
Projection_Output_Method_Error_Message_Canvas = canvas.create_window(Output_Filenames_Horizontal.get(),Pipeline_Menu_Spacing.get(),window=Projection_Output_Method_Error_Message)
#Error symbol moves if Z Location is the selected method
if Pipeline_Method_Selection.get() == 'Z Location Value':
Z_Loc_Error_Label_Canvas = canvas.create_window(Error_Symbol_Horizontal.get(),Z_Value_Spacing.get(),window=Z_Loc_Error_Label)
'''
Check if reconstructions in the input folder selected are formatted properly
Proper format for reconstruction folder:
Main folder:
Any valid name
Must contain only sub-folders with names that are numbers such as -20.000 or 0.000 that correspond to the z-slices created during the reconstruction
Beware of hidden folders/files, will return a result in "Reconstruction directory does not contain valid Folders"
To check if this is the cause:
1)Open command prompt and change the directory to the folder being selected. (typically cd [folder address])
2)Print all directories with:
2a) Windows: "dir"
2b) Mac/Linux: "ls" (this is a lower case L)
3)If anything other than the expected folders appear on the list, this is the cause. Otherwise check inside sub-folders
Sub-folders:
Name can only be a number with two sections separated by a period that correspond to the z-slices that were reconstructed. Example folder names are "10.000", "10.250", "-20.000", and "0.000"
Image files:
Only tif/tiff files allowed inside of the sub-folders
Name must be an integer that corresponds to the time point where it was reconstructed from
Can't be placed outside of the sub-folders
'''
if not isdir(Input_Directory_Text.get()):
Errors_Found[0] = 1
Input_Folder_Error_Check.set(1)
if isdir(Input_Directory_Text.get()):
Input_Folder_Error_Check.set(0)
if Input_Folder_Error_Check.get() == 0:
try:
Folder_Names = np.array([(f,float(f.name)) for f in os.scandir(Input_Directory_Text.get())])
except ValueError:
Errors_Found[0] = 1
Input_Subfolder_Error_Check.set(1)
else:
if len(Folder_Names) == 0:
Errors_Found[0] = 1
Input_Subfolder_Error_Check.set(1)
if len(Folder_Names) > 0:
Descending_Order = Folder_Names[(-Folder_Names[:,1]).argsort()]
Input_Subfolder_Error_Check.set(0)
if Input_Subfolder_Error_Check.get() == 0:
Folder_Names = np.array([(f,float(f.name)) for f in os.scandir(Input_Directory_Text.get())])
Descending_Order = Folder_Names[(-Folder_Names[:,1]).argsort()]
for folders in range(len(Descending_Order)):
try:
[[File_Names.split('.')[0],File_Names.split('.')[1]] for File_Names in listdir(Descending_Order[folders,0].path) if isfile(join(Descending_Order[folders,0],File_Names))]
except:
Input_Reconstruction_Files_Error_Check.set(1)
else:
File_Names = [[File_Names.split('.')[0],File_Names.split('.')[1]] for File_Names in listdir(Descending_Order[folders,0].path) if isfile(join(Descending_Order[folders,0],File_Names))]
if len(File_Names) == 0:
Input_Reconstruction_Files_Error_Check.set(1)
if len(File_Names)>0:
filename_check = np.zeros(len(File_Names))
filetype_check = np.zeros(len(File_Names)).astype(np.unicode_)
issues_found = 0
for x in range(len(File_Names)):
proceed = 0
try:
filename_check[x] = File_Names[x][0]
except ValueError:
issues_found = 1
Input_Reconstruction_Files_Error_Check.set(1)
break
if File_Names[x][1] != 'tif' and File_Names[x][1] != 'tiff':
issues_found = 1
Input_Reconstruction_Files_Error_Check.set(1)
break
if issues_found==1:
Errors_Found[0] = 1
Input_Reconstruction_Files_Error_Check.set(1)
break
if issues_found == 0:
Input_Reconstruction_Files_Error_Check.set(0)
if Input_Folder_Error_Check.get() == 0:
canvas.delete(Input_Folder_Error_Canvas)
if Input_Subfolder_Error_Check.get() == 0:
canvas.delete(Input_Subfolder_Error_Canvas)
if Input_Reconstruction_Files_Error_Check.get() == 0:
canvas.delete(Input_Reconstruction_Files_Error_Canvas)
if Input_Folder_Error_Check.get() == 0 and Input_Subfolder_Error_Check.get() == 0 and Input_Reconstruction_Files_Error_Check.get() == 0:
canvas.delete(Input_Error_Label_Canvas)
'''
Check if the output directory exists
'''
if not isdir(Output_Directory_Text.get()):
Errors_Found[1] = 1
if isdir(Output_Directory_Text.get()):
canvas.delete(Output_Error_Label_Canvas)
canvas.delete(Output_Directory_Folder_Error_Canvas)
'''
Check that Z Projection filename is valid
'''
if Z_Project_Value_Name.get() == '(Required)' and (Pipeline_Method_Selection.get() == 'Z Projection Value' or Pipeline_Method_Selection.get() == 'Both'):
Errors_Found[2] = 1
try:
with open(Output_Directory_Text.get()+Z_Project_Value_Name.get()+'.tif','w') as file:
pass
except:
Errors_Found[2] = 1
else:
os.remove(Output_Directory_Text.get()+Z_Project_Value_Name.get()+'.tif')
'''
Check that Z Location filename is valid
'''
if Z_Project_Loc_Name.get() == '(Required)' and (Pipeline_Method_Selection.get() == 'Z Location Value' or Pipeline_Method_Selection.get() == 'Both'):
Errors_Found[3] = 1
try:
with open(Output_Directory_Text.get()+Z_Project_Loc_Name.get()+'.tif','w') as file:
pass
except:
Errors_Found[3] = 1
else:
os.remove(Output_Directory_Text.get()+Z_Project_Loc_Name.get()+'.tif')
if Pipeline_Method_Selection.get() == 'Select One':
Errors_Found[4] = 1
'''
Count the number of errors
'''
Error_Count = np.count_nonzero(Errors_Found!=0)
'''
Delete the error messages that are not needed.
'''
if Errors_Found[0] == 0:
canvas.delete(Input_Error_Label_Canvas)
if Errors_Found[1] == 0:
canvas.delete(Output_Error_Label_Canvas)
if Errors_Found[2] == 0:
canvas.delete(Z_Value_Error_Label_Canvas)
canvas.delete(Z_Value_Filename_Error_Canvas)
if Errors_Found[3] == 0:
canvas.delete(Z_Loc_Error_Label_Canvas)
canvas.delete(Z_Loc_Filename_Error_Canvas)
if Errors_Found[4] == 0:
canvas.delete(Projection_Output_Method_Error_Canvas)
canvas.delete(Projection_Output_Method_Error_Message_Canvas)
if Error_Count == 0:
canvas.delete(Overall_Error_Message_Canvas)
GUI.update()
Save_Option_Window()
def Z_Projection():
'''
start - The starting point for reporting how long the processes have taken
Reconstruction_Files - The files that have their paths denoted and are sorted by increasing value of Z
Z_Slice_Values[1] - The z slice values, used for locating which slice the max value came from
Z_Proj_Loc - An array of the z slice where the maximum value is for each (x,y) column
Reconstruction files are first organized and processed one time point at a time. The z-stack is built
for the current time point and depending upon the method chosen either the max value, location of max value,
or both are saved in a 2D array.
'''
start = timeit.default_timer()
#Compiles the folderpath and z-slice values for the reconstruction folder given into an array with the format of [z,t]
Reconstruction_Files,Z_Slice_Values = Order_Holograms_After_Reconstruction(Input_Directory_Text.get())
#Load first tif to get size of image
Shape_Finding = io.imread(Reconstruction_Files[0][0])
#Saving output(s) as hyperstack(s) (t,x,y)
if Save_As_Hyperstack_Check.get() == 1:
#Create empty output arrays dependent upon method chosen
if Pipeline_Method_Selection.get() == 'Both':
Z_Proj_Value = np.zeros((Reconstruction_Files.shape[1],Shape_Finding.shape[0],Shape_Finding.shape[1]),'<f4')
Z_Proj_Loc = np.zeros((Reconstruction_Files.shape[1],Shape_Finding.shape[0],Shape_Finding.shape[1]),'<f4')
if Pipeline_Method_Selection.get() == 'Z Projection Value':
Z_Proj_Value = np.zeros((Reconstruction_Files.shape[1],Shape_Finding.shape[0],Shape_Finding.shape[1]),'<f4')
if Pipeline_Method_Selection.get() == 'Z Location Value':
Z_Proj_Loc = np.zeros((Reconstruction_Files.shape[1],Shape_Finding.shape[0],Shape_Finding.shape[1]),'<f4')
#Run for every time point
for time_point in range(Reconstruction_Files.shape[1]):
#Create empty input array
Reconstruction_Built = np.zeros((Reconstruction_Files.shape[0],Shape_Finding.shape[0],Shape_Finding.shape[1]),'<f4')
#Populate input array with holograms
for x in range(Reconstruction_Files.shape[0]):
Reconstruction_Built[x,:,:] = io.imread(Reconstruction_Files[x][time_point])
#Print progress every 5 z-slices
if x != 0 and x%5==0:
print(f'Finished loading {x} z-slices out of {Reconstruction_Files.shape[0]} ({np.round(timeit.default_timer()-start,3)}) seconds')
print(f'Finding Z Projection and Location for Time Point #{time_point+1} ({np.round(timeit.default_timer()-start,3)}) seconds')
#Minimum Projection used
if Output_Min_Format.get() == 1:
if Pipeline_Method_Selection.get() == 'Both':
Z_Proj_Value[time_point],Z_Proj_Loc[time_point] = np.min(Reconstruction_Built,axis=0),Z_Slice_Values[np.argmin(Reconstruction_Built,axis=0),1].astype('<f4')
if Pipeline_Method_Selection.get() == 'Z Projection Value':
Z_Proj_Value[time_point] = np.min(Reconstruction_Built,axis=0)
if Pipeline_Method_Selection.get() == 'Z Location Value':
Z_Proj_Loc[time_point] = Z_Slice_Values[np.argmin(Reconstruction_Built,axis=0),1].astype('<f4')
#Maximum Projection used
if Output_Max_Format.get() == 1:
if Pipeline_Method_Selection.get() == 'Both':
Z_Proj_Value[time_point],Z_Proj_Loc[time_point] = | np.max(Reconstruction_Built,axis=0) | numpy.max |
import numpy as np
import os
import pickle
#128x128
class Filter3x3:
n_filters = 0
PATH_NAME = '' # set this if you want to train it
filters = []
weights = []
biases = []
# generate weights and biases
# self.weights = random.randn(n_inputs, n_nodes) / n_inputs
# self.biases = zeros(n_nodes)
lastInShape = []
lastIn = []
lastTotals = 0
lastPoolIn = []
lastFilterIn = []
'''
Takes 2d matrix of image and transforms it with all the 3x3 filters
Outputs 3d array of transformed images
'''
def filter(self, imageMatrix): # input image 2d array/matrix
imageMatrix = np.subtract(np.divide(imageMatrix, 255), 0.5) # make values between -0.5 and 0.5 #
#imageMatrix = pad(imageMatrix, (1, 1), 'constant') # pad 0s around
self.lastFilterIn = imageMatrix
h, w = imageMatrix.shape
transformedImage = np.zeros((self.n_filters, h-2, w-2)) # same dimension as original image matrix
for k in range(self.n_filters):
for i in range(h-2): # iterates all possible 3x3 regions in the image
for j in range(w-2):
temp3x3 = imageMatrix[i:(i+3), j:(j+3)] #selects 3x3 area using current indexes
transformedImage[k, i, j] = np.sum(temp3x3 * self.filters[k])
return transformedImage
'''
Backward prop for filter
'''
def bpFilter(self, lossGradient, learn_rate):
lossGradientFilters = np.zeros(self.filters.shape)
h, w = self.lastFilterIn.shape
for f in range(self.n_filters):
for i in range(h-2): # iterates all possible size x size regions in the image
for j in range(w-2):
tempSel = self.lastFilterIn[i:(i+3), j:(j+3)]
lossGradientFilters[f] += tempSel * lossGradient[f, i, j]
# Update filters
self.filters -= learn_rate * lossGradientFilters
#1st layer -> return nothing
return None
'''
Cuts down the size of image to get rid of redundant info
'''
def pool(self, imageMatrix): # pool by size of 2
x, h, w = imageMatrix.shape
h = h // 2
w = w // 2
self.lastPoolIn = imageMatrix
transformedImage = | np.zeros((self.n_filters, h, w)) | numpy.zeros |
import os
import numpy as np
np.random.seed(1969)
import tensorflow as tf
tf.set_random_seed(1969)
from scipy import signal
from glob import glob
import re
import pandas as pd
import gc
from scipy.io import wavfile
from keras import optimizers, losses, activations, models
from keras.layers import GRU, LSTM, Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, Conv3D, ConvLSTM2D
from keras.callbacks import TensorBoard
from keras.models import Sequential
from tqdm import tqdm
from sklearn.model_selection import GroupKFold
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
L = 16000
legal_labels = 'yes no up down left right on off stop go silence unknown'.split()
root_path = r'../'
out_path = r'.'
model_path = r'.'
train_data_path = os.path.join(root_path, 'train', 'audio')
test_data_path = os.path.join(root_path, 'test', 'audio')
def list_wavs_fname(dirpath, ext='wav'):
print(dirpath)
fpaths = glob(os.path.join(dirpath, r'*/*' + ext))
pat = r'.+/(\w+)/\w+\.' + ext + '$'
labels = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
labels.append(r.group(1))
pat = r'.+/(\w+\.' + ext + ')$'
fnames = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
fnames.append(r.group(1))
return labels, fnames
def pad_audio(samples):
if len(samples) >= L: return samples
else: return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))
def chop_audio(samples, L=16000, num=1000):
for i in range(num):
beg = np.random.randint(0, len(samples) - L)
yield samples[beg: beg + L]
def label_transform(labels):
nlabels = []
for label in labels:
if label == '_background_noise_':
nlabels.append('silence')
elif label not in legal_labels:
nlabels.append('unknown')
else:
nlabels.append(label)
return pd.get_dummies(pd.Series(nlabels))
labels, fnames = list_wavs_fname(train_data_path)
new_sample_rate=16000
y_train = []
x_train = | np.zeros((64727,99,26),np.float32) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 10:50:16 2019
@author: ams_user
"""
from ukf import UKF
import csv
import numpy as np
import math
import matplotlib.pyplot as plt
def iterate_x(x_in, timestep, inputs):
'''this function is based on the x_dot and can be nonlinear as needed'''
ret = np.zeros(len(x_in))
if x_in[4] == 0:
ret[0] = x_in[0] + x_in[2] * math.cos(x_in[3]) * timestep
ret[1] = x_in[1] + x_in[2] * math.sin(x_in[3]) * timestep
ret[2] = x_in[2]
ret[3] = x_in[3] + timestep * x_in[4]
ret[4] = x_in[4]
else:
ret[0] = x_in[0] + (x_in[2] / x_in[4]) * (math.sin(x_in[3] + x_in[4] * timestep) - math.sin(x_in[3]))
ret[1] = x_in[1] + (x_in[2] / x_in[4]) * (-math.cos(x_in[3] + x_in[4] * timestep) + math.cos(x_in[3]))
ret[2] = x_in[2]
ret[3] = x_in[3] + timestep * x_in[4]
ret[4] = x_in[4]
return ret
def main():
np.set_printoptions(precision=3)
# Process Noise
q = np.eye(5)
q[0][0] = 0.001
q[1][1] = 0.001
q[2][2] = 0.004
q[3][3] = 0.025
q[4][4] = 0.025
# q[5][5] = 0.0025
realx = []
realy = []
realv = []
realtheta = []
realw = []
estimatex = []
estimatey =[]
estimatev = []
estimatetheta = []
estimatew = []
estimate2y=[]
estimate2x=[]
# create measurement noise covariance matrices
r_imu = np.zeros([1, 1])
r_imu[0][0] = 0.01
r_compass = np.zeros([1, 1])
r_compass[0][0] = 0.02
r_encoder = np.zeros([1, 1])
r_encoder[0][0] = 0.001
r_gpsx = np.zeros([1, 1])
r_gpsx[0][0] = 0.1
r_gpsy = np.zeros([1, 1])
r_gpsy[0][0] = 0.1
ini=np.array([0, 0, 0.3, 0, 0.3])
# pass all the parameters into the UKF!
# number of state variables, process noise, initial state, initial coariance, three tuning paramters, and the iterate function
state_estimator = UKF(5, q, ini, np.eye(5), 0.04, 0.0, 2.0, iterate_x, r_imu, r_compass, r_encoder, r_gpsx, r_gpsy)
xest = np.zeros((5, 1))
pest = np.eye(5)
jf=[]
with open('example.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
last_time = 0
# read data
for row in reader:
row = [float(x) for x in row]
cur_time = row[0]
d_time = cur_time - last_time
real_state = np.array([row[i] for i in [5, 6, 3, 4, 2]])
# create an array for the data from each sensor
compass_hdg = row[9]
compass_data = np.array([compass_hdg])
encoder_vel = row[10]
encoder_data = np.array([encoder_vel])
gps_x = row[11]
gpsx_data = np.array([gps_x])
gps_y = row[12]
gpsy_data = np.array([gps_y])
imu_yaw_rate = row[8]
imu_data = | np.array([imu_yaw_rate]) | numpy.array |
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import sys
import os
import numpy as np
import pandas as pd
import math
import h5py
import click
import gc
from zfits import FactFits
from tqdm import tqdm
from fact.instrument.camera import non_standard_pixel_chids as non_standard_chids
import fact.plotting as factPlots
from astropy.io import fits
from matplotlib import gridspec
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.colors as colors
from matplotlib.cm import hot, seismic
import config as config
from constants import NRCHID, NRCELL, ROI, PEAFACTOR, DACfactor, ADCCOUNTSTOMILIVOLT
###############################################################################
# ############## Helper ############## #
###############################################################################
interval_color = ['k', 'royalblue', 'orange', 'forestgreen']
###############################################################################
font = {'family': 'serif',
'color': 'grey',
'weight': 'bold',
'size': 16,
'alpha': 0.5,
}
###############################################################################
def get_best_limits(value, scale=0.02):
min, max = np.amin(value), np.amax(value)
range = max-min
offset = range*scale
return [min-offset, max+offset]
###############################################################################
def linearerFit(x, m, b):
return (m*x+b)
###############################################################################
def get_not_useful_chids(interval_nr):
# default values
not_useful_chids = np.array([non_standard_chids['crazy'],
non_standard_chids['dead']]).flatten()
if(interval_nr == 2):
not_useful_chids = np.append(not_useful_chids, np.arange(720, 755+1, 1))
elif(interval_nr == 3):
not_useful_chids = np.append(not_useful_chids, np.arange(1296, 1299+1, 1))
not_useful_chids = np.append(not_useful_chids, np.arange(697, 701+1, 1))
not_useful_chids = np.append(not_useful_chids, np.arange(1080, 1439+1))
return np.unique(not_useful_chids)
###############################################################################
def get_useful_chids(interval_nr, bad_chids=[]):
chid_list = np.linspace(0, NRCHID-1, NRCHID, dtype='int')
not_useful_chids = get_not_useful_chids(interval_nr)
not_useful_chids = np.unique(np.append(not_useful_chids, bad_chids)).astype('int')
# print(np.sort(not_useful_chids))
useful_chids = chid_list[np.setdiff1d(chid_list, not_useful_chids)]
return useful_chids
###############################################################################
# See https://matplotlib.org/users/colormapnorms.html
###############################################################################
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
###############################################################################
# ############## Drs-Value Plots ############## #
###############################################################################
###############################################################################
@click.command()
@click.argument('data_collection_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/dataCollection.h5',
type=click.Path(exists=True))
@click.argument('interval_file_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/intervalIndices.h5',
type=click.Path(exists=True))
@click.argument('store_file_path',
default='/home/fschulz/plots/version_1/drsValues/gain/std_hist_gain.png',
type=click.Path(exists=False))
@click.argument('interval_array',
default=[3])
@click.option('--drs_value_type', '-type',
default='Gain',
type=click.Choice(['Baseline', 'Gain']))
@click.argument('x_lim',
default=[0.0, 4.2])
###############################################################################
def drs_value_std_hist(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, x_lim):
drs_value_std_hist_(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, x_lim)
###############################################################################
def drs_value_std_hist_(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, x_lim):
NRCELLSPERCHID = config.nrCellsPerChid[drs_value_type]
factor_str = ''
unit_str = r' $\mathrm{mV}$'
for interval_nr in interval_array:
groupname = 'Interval'+str(interval_nr)
title_str = 'Intervall '+str(interval_nr)
print(groupname)
with h5py.File(interval_file_path, 'r') as interval_source:
data = interval_source[groupname]
interval_indices = np.array(data['IntervalIndices'])
print('loading')
with h5py.File(data_collection_path, 'r') as store:
drs_value_std = store[drs_value_type+'Std'][interval_indices, :].astype('float32')
useful_chids = get_useful_chids(interval_nr)
drs_value_std = drs_value_std.reshape(-1, NRCHID, NRCELLSPERCHID)[:, useful_chids, :].flatten()
if(drs_value_type == 'Gain'):
drs_value_std /= pow(10, 3)
factor_str = r' $\cdot$'
unit_str = r' $10^{-3}$'
drs_value_std_mean = np.mean(drs_value_std)
drs_value_std_std = np.std(drs_value_std)
drs_value_std_max = max(drs_value_std)
drs_value_std_min = min(drs_value_std)
color = 'g'
weights = np.full(len(drs_value_std), 100/len(drs_value_std))
nr_bins = int(abs((drs_value_std_max-drs_value_std_min))/abs(x_lim[1]-x_lim[0])*100)
plt.hist(drs_value_std, bins=nr_bins, weights=weights,
histtype='step', label=title_str+':', color=color)
info_str = (r' $\overline{x}$: '+'({:0.2f}'.format(drs_value_std_mean) +
' $\pm$ '+'{:0.2f})'.format(drs_value_std_std)+factor_str+unit_str +
'\n'+r' $x_\mathrm{Max}$: '+'{:0.2f}'.format(drs_value_std_max)+factor_str+unit_str)
plt.plot([], [], label=info_str)
del drs_value_std
gc.collect()
plt.xlabel(r'Standardabweichung /'+unit_str)
plt.ylabel(r'Häufigkeit /$\mathrm{\%}$')
plt.xlim(x_lim)
plt.legend(loc='upper right')
plt.tight_layout()
if(store_file_path is not None):
plt.savefig(store_file_path, dpi=200)
plt.show()
plt.close()
###############################################################################
@click.command()
@click.argument('data_collection_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/dataCollection.h5',
type=click.Path(exists=True))
@click.argument('interval_file_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/intervalIndices.h5',
type=click.Path(exists=True))
@click.argument('store_file_path',
default='/home/fschulz/plots/version_1/drsValues/gain/std_hist_chid250_cell250_gain.png',
type=click.Path(exists=False))
@click.argument('interval_array',
default=[3])
@click.option('--drs_value_type', '-type',
default='Gain',
type=click.Choice(['Baseline', 'Gain']))
@click.argument('chid',
default=250)
@click.argument('cell',
default=250)
@click.argument('cut_off_error_factor',
default=2)
@click.argument('x_lim',
default=[0.0, 4.2])
###############################################################################
def drs_value_std_hist_per_chid_cell(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, chid,
cell, cut_off_error_factor,
x_lim):
drs_value_std_hist_per_chid_cell_(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, chid,
cell, cut_off_error_factor,
x_lim)
###############################################################################
def drs_value_std_hist_per_chid_cell_(data_collection_path, interval_file_path,
store_file_path, interval_array,
drs_value_type, chid,
cell, cut_off_error_factor,
x_lim):
NRCELLSPERCHID = config.nrCellsPerChid[drs_value_type]
if(cell > NRCELLSPERCHID):
print('ERROR: cell > '+str(NRCELLSPERCHID))
return
value_index = chid*NRCELLSPERCHID + cell
factor_str = ''
unit_str = r'$\mathrm{mV}$'
for interval_nr in interval_array:
groupname = 'Interval'+str(interval_nr)
title_str = 'Intervall '+str(interval_nr)
print(groupname)
with h5py.File(interval_file_path, 'r') as interval_source:
data = interval_source[groupname]
interval_indices = np.array(data['IntervalIndices'])
print('loading')
with h5py.File(data_collection_path, 'r') as store:
drs_value_std = store[drs_value_type+'Std'][interval_indices, value_index].astype('float32')
if(drs_value_type == 'Gain'):
drs_value_std /= DACfactor/pow(10, 3)
factor_str = r' $\cdot$'
unit_str = r' $10^{-3}$'
drs_value_std_mean = np.mean(drs_value_std)
drs_value_std_std = np.std(drs_value_std)
drs_value_std_max = max(drs_value_std)
drs_value_std_min = min(drs_value_std)
drs_value_std_limit = drs_value_std_mean*cut_off_error_factor
n = len(drs_value_std)
n_ = len(drs_value_std[drs_value_std > drs_value_std_limit])
print(n_/n*100, '%')
plot = plt.plot([], [])
color = plot[0].get_color()
label = (title_str+':' +
'\n'+r' $\overline{x}$: '+str(round(drs_value_std_mean, 2))+factor_str+unit_str +
'\n'+r' $\sigma_\mathrm{Hist}$: '+str(round(drs_value_std_std, 2))+factor_str+unit_str +
'\n'+r' $x_\mathrm{Max}$: '+str(round(drs_value_std_max, 2)))+factor_str+unit_str+'\n'
weights = np.full(len(drs_value_std), 100/len(drs_value_std))
nr_bins = int(abs((drs_value_std_max-drs_value_std_min))/abs(x_lim[1]-x_lim[0])*100)
plt.hist(drs_value_std, bins=nr_bins, weights=weights,
histtype='step', label=label, color=color)
plt.axvline(x=drs_value_std_limit, linewidth=2, ls='--', color=color)
plt.xlabel(r'Standardabweichung / '+unit_str)
plt.ylabel(r'Häufigkeit / $\mathrm{\%}$')
plt.xlim(x_lim)
plt.legend(loc='upper right')
if(store_file_path is not None):
plt.savefig(store_file_path, dpi=200)
plt.show()
plt.close()
###############################################################################
@click.command()
@click.argument('data_collection_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/dataCollection.h5',
type=click.Path(exists=True))
@click.argument('interval_file_path',
default='/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/intervalIndices.h5',
type=click.Path(exists=True))
@click.argument('fit_file_path_array',
default=['/net/big-tank/POOL/projects/fact/drs4_calibration_data/' +
'calibration/calculation/version_1/drsFitParameter_interval3.fits'],
)
@click.argument('store_file_path',
default='/home/fschulz/plots/version_1/drsValues/gain/chid1101_cell240_interval3.jpg',
type=click.Path(exists=False))
@click.argument('interval_array',
default=[3])
@click.option('--drs_value_type', '-type',
default='Gain',
type=click.Choice(['Baseline', 'Gain']))
@click.argument('chid',
default=1101)
@click.argument('cell',
default=240)
@click.argument('ylimits',
default=[])
###############################################################################
def drs_value_cell(data_collection_path, interval_file_path, fit_file_path_array,
store_file_path, interval_array, drs_value_type,
chid, cell, ylimits):
drs_value_cell_(data_collection_path, interval_file_path, fit_file_path_array,
store_file_path, interval_array, drs_value_type,
chid, cell, ylimits)
###############################################################################
def drs_value_cell_(data_collection_path, interval_file_path, fit_file_path_array,
store_file_path, interval_array, drs_value_type,
chid, cell, ylimits=[]):
sampleID = 200
NRCELLSPERCHID = config.nrCellsPerChid[drs_value_type]
if(cell > NRCELLSPERCHID):
print('ERROR: cell > '+str(NRCELLSPERCHID))
return
value_index = chid*NRCELLSPERCHID + cell
value_index_ = value_index
# loading source data
with h5py.File(data_collection_path, 'r') as store:
time = np.array(store['Time'+drs_value_type]).flatten()
ylabel_str = drs_value_type+r' / $\mathrm{mV}$'
slope_unit_str = r'$\,\frac{\mathrm{mV}}{°C}$'
offset_unit_str = r'$\,\mathrm{mV}$'
if(drs_value_type == 'Gain'):
ylabel_str = drs_value_type+r' / $10^{-3}$'
slope_unit_str = r'$\,\frac{10^{-6}}{°C}$'
offset_unit_str = r'$\cdot 10^{-3}$'
use_mask = True
if(drs_value_type == 'Baseline'):
value_index_ = chid*NRCELLSPERCHID*ROI + cell*ROI+sampleID
use_mask = False
mask_collection = []
time_collection = []
temp_collection = []
drs_value_collection = []
fit_value_collection = []
for interval_nr in interval_array:
groupname = 'Interval'+str(interval_nr)
with h5py.File(interval_file_path, 'r') as interval_source:
data = interval_source[groupname]
interval_indices = | np.array(data['IntervalIndices']) | numpy.array |
''' CONFIDENTIAL
Copyright (c) 2021 <NAME>,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
try:
import pcl
from pyquaternion import Quaternion
except:
print('cannot import pcl -> change python version')
import matplotlib.cm as cmx
from scipy.spatial import distance_matrix
from scipy.optimize import leastsq
import matplotlib
import matplotlib.animation as animation
import open3d as o3d
import glob
import cv2
import cv2.aruco as aruco
import os
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
import pickle
from matplotlib.lines import Line2D
import pandas as pd
import random
from scipy.spatial import ConvexHull
from math import sqrt
from math import atan2, cos, sin, pi
from collections import namedtuple
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
from pyquaternion import Quaternion
np.set_printoptions(suppress=True)
def eulerAnglesToRotationMatrix2(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
Rot_matrix = eulerAnglesToRotationMatrix2([0, 0, np.deg2rad(-90)])
InitLidar = True
InitLidar = False
global globalTrigger
globalTrigger = True
stereoRectify = False# True
#stereoRectify = True
class Annotation3D(Annotation):
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self, s, xy=(0, 0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy = (xs, ys)
Annotation.draw(self, renderer)
def save_obj(obj, name):
with open('/home/eugeniu/catkin_ws/src/testNode/CAMERA_CALIBRATION/data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, protocol=2)
print('{}.pkl Object saved'.format(name))
def load_obj(name):
with open('/home/eugeniu/Desktop/my_data/CameraCalibration/data/saved_files/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def showErros(_3DErros, IMageNames):
print('len(_3DErros)->{}'.format(np.shape(_3DErros)))
if len(_3DErros)>1:
_3DErros = np.array(_3DErros).squeeze()
# norm_total = np.array(_3DErros[:,0]).squeeze()
norm_axis = np.array(_3DErros).squeeze() * 1000
index, bar_width = np.arange(len(IMageNames)), 0.24
fig, ax = plt.subplots()
X = ax.bar(index, norm_axis[:, 0], bar_width, label="X")
Y = ax.bar(index + bar_width, norm_axis[:, 1], bar_width, label="Y")
Z = ax.bar(index + bar_width + bar_width, norm_axis[:, 2], bar_width, label="Z")
ax.set_xlabel('images')
ax.set_ylabel('errors in mm')
ax.set_title('3D error')
ax.set_xticks(index + bar_width / 3)
ax.set_xticklabels(IMageNames)
ax.legend()
plt.show()
def triangulation(kp1, kp2, T_1w, T_2w):
"""Triangulation to get 3D points
Args:
kp1 (Nx2): keypoint in view 1 (normalized)
kp2 (Nx2): keypoints in view 2 (normalized)
T_1w (4x4): pose of view 1 w.r.t i.e. T_1w (from w to 1)
T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
Returns:
X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
"""
kp1_3D = np.ones((3, kp1.shape[0]))
kp2_3D = np.ones((3, kp2.shape[0]))
kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
X /= X[3]
X1 = T_1w[:3].dot(X)
X2 = T_2w[:3].dot(X)
return X[:3].T, X1.T, X2.T
def triangulate(R1,R2,t1,t2,K1,K2,D1,D2, pts1, pts2):
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
_3d_points = []
for i,point in enumerate(pts1):
point3D = cv2.triangulatePoints(P1, P2, pts1[i], pts2[i]).T
point3D = point3D[:, :3] / point3D[:, 3:4]
_3d_points.append(point3D)
print('Triangulate _3d_points -> {}'.format(np.shape(_3d_points)))
return np.array(_3d_points).squeeze()
def mai(R1,R2,t1,t2,imagePoint1,imagePoint2, K2=None,K1=None, D2=None,D1=None):
# Set up two cameras near each other
if K1 is None:
K = np.array([
[718.856, 0., 607.1928],
[0., 718.856, 185.2157],
[0., 0., 1.],
])
R1 = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
R2 = np.array([
[0.99999183, -0.00280829, -0.00290702],
[0.0028008, 0.99999276, -0.00257697],
[0.00291424, 0.00256881, 0.99999245]
])
t1 = np.array([[0.], [0.], [0.]])
t2 = np.array([[-0.02182627], [0.00733316], [0.99973488]])
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
P1 = np.hstack([R1.T, -R1.T.dot(t1)])
P2 = np.hstack([R2.T, -R2.T.dot(t2)])
P1 = K1.dot(P1)
P2 = K2.dot(P2)
# Triangulate
point3D = cv2.triangulatePoints(P1, P2, imagePoint1, imagePoint2).T
point3D = point3D[:, :3] / point3D[:, 3:4]
print('Triangulate point3D -> {}'.format(point3D))
# Reproject back into the two cameras
rvec1, _ = cv2.Rodrigues(R1.T) # Change
rvec2, _ = cv2.Rodrigues(R2.T) # Change
p1, _ = cv2.projectPoints(point3D, rvec1, -t1, K1, distCoeffs=D1) # Change
p2, _ = cv2.projectPoints(point3D, rvec2, -t2, K2, distCoeffs=D2) # Change
# measure difference between original image point and reporjected image point
reprojection_error1 = np.linalg.norm(imagePoint1 - p1[0, :])
reprojection_error2 = np.linalg.norm(imagePoint2 - p2[0, :])
print('difference between original image point and reporjected image point')
print(reprojection_error1, reprojection_error2)
return p1,p2
class PointCloud_filter(object):
def __init__(self, file, img_file=None, img_file2=None, debug=True):
self.debug = debug
self.img_file = img_file
self.img_file2 = img_file2
self.name = os.path.basename(file).split('.')[0]
self.file = file
self.useVoxel, self.voxel_size = False, 0.15
self.lowerTemplate, self.showImage = False, True
self.showError = False
self.points_correspondences = None
self.OK = False
self.useInitialPointCloud = False #user all point to fit or only margins
self.chessBoard = False
self.applyICP_directly = False
self.s = .1 # scale
self.plotInit, self.axis_on, self.colour, self.Annotate = False, True, False, False
self.chess, self.corn, self.p1, self.p2, self.p3, self.ICP_finetune_plot = None, None, None, None, None, None
if self.showImage:
b = 1
self.pts = np.float32([[0, b, 0], [b, b, 0], [b, 0, 0], [-0.03, -0.03, 0]])
self.ImageNames = []
self._3DErros = []
self.criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
self.axis = np.float32([[1, 0, 0], [0, 1, 0], [0, 0, -1]]).reshape(-1, 3)
self.objp = np.zeros((7 * 10, 3), np.float32)
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.s
self.fig = plt.figure(figsize=plt.figaspect(0.5))
self.fig.suptitle('Data collection', fontsize=16)
self.ax = self.fig.add_subplot(1, 2, 1, projection='3d')
#self.ax = self.fig.add_subplot(1, 2, 2, projection='3d')
self.readCameraIntrin()
self.QueryImg = cv2.imread(img_file)
self.ImageNames.append(os.path.basename(img_file))
if self.img_file2: # use stereo case
self.QueryImg2 = cv2.imread(img_file2)
if stereoRectify:
self.QueryImg = cv2.remap(src=self.QueryImg, map1=self.leftMapX, map2=self.leftMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
self.QueryImg2 = cv2.remap(src=self.QueryImg2, map1=self.rightMapX, map2=self.rightMapY,
interpolation=cv2.INTER_LINEAR, dst=None, borderMode=cv2.BORDER_CONSTANT)
gray_left = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret_left, corners_left = cv2.findChessboardCorners(gray_left, (10, 7), None)
gray_right = cv2.cvtColor(self.QueryImg2, cv2.COLOR_BGR2GRAY)
ret_right, corners_right = cv2.findChessboardCorners(gray_right, (10, 7), None)
if ret_right and ret_left:
print('Found chessboard in both images')
self.chessBoard = True
corners2_left = cv2.cornerSubPix(gray_left, corners_left, (11, 11), (-1, -1), self.criteria)
self.corners2 = corners2_left
cv2.drawChessboardCorners(self.QueryImg, (10, 7), self.corners2, ret_left)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K_left, self.D_left)
imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.QueryImg = self.draw(self.QueryImg, corners=corners2_left, imgpts=imgpts)
self.pixelsPoints = np.asarray(corners2_left).squeeze()
self.pixels_left = np.asarray(corners2_left).squeeze()
corners2_right = cv2.cornerSubPix(gray_right, corners_right, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg2, (10, 7), corners2_right, ret_right)
self.pixels_right = np.asarray(corners2_right).squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
'''print('TRIANGULATE HERE==========================================')
P_1 = np.vstack((np.hstack((np.eye(3), np.zeros(3)[:, np.newaxis])), [0, 0, 0, 1])) # left camera
P_2 = np.vstack((np.hstack((self.R, self.T)), [0, 0, 0, 1])) # right camera
print('P1_{}, P_2{}, x_left:{}, x_right:{}'.format(np.shape(P_1), np.shape(P_2),
np.shape(self.x_left), np.shape(self.x_right)))
X_w, X1, X2 = triangulation(self.x_left,self.x_right,P_1,P_2)
print('X_w:{}, X1:{}, X2:{}, '.format(np.shape(X_w), np.shape(X1), np.shape(X2)))
print(X_w[0])
print(X1[0])
print(X2[0])'''
'''R1 = np.eye(3)
R2 = self.R
t1 = np.array([[0.], [0.], [0.]])
t2 = self.T
# Corresponding image points
imagePoint1 = np.array([371.91915894, 221.53485107])
imagePoint2 = np.array([368.26071167, 224.86262512])
imagePoint1 = self.x_left[0]
imagePoint2 = self.x_right[0]
print('imagePoint1:{}, imagePoint2:{}'.format(np.shape(imagePoint1), np.shape(imagePoint2)))
print('self.K_left ')
print(self.K_left)
print('self.K_right ')
print(self.K_right)
p1,p2 = test(R1,R2,t1,t2,imagePoint1,imagePoint2,K1=self.K_left,K2=self.K_right, D1=self.D_left,D2=self.D_right)
p1 = np.array(p1).squeeze().astype(int)
p2 = np.array(p2).squeeze().astype(int)
print('p1:{}, p2:{}'.format(np.shape(p1), np.shape(p2)))
#d2 = distance_matrix(X_w, X_w)
#print('d2:{}'.format(d2))
cv2.circle(self.QueryImg, (p1[0],p1[1]), 7, (255, 0, 0), 7)
cv2.circle(self.QueryImg2, (p2[0], p2[1]), 7, (255, 0, 0), 7)
cv2.imshow('QueryImg', cv2.resize(self.QueryImg,None,fx=.5,fy=.5))
cv2.imshow('QueryImg2', cv2.resize(self.QueryImg2, None, fx=.5, fy=.5))
cv2.waitKey(0)
cv2.destroyAllWindows()'''
else:
self.chessBoard = False
self.useVoxel = False
print('No chessboard ')
corners2_left, ids_left, rejectedImgPoints = aruco.detectMarkers(gray_left, self.ARUCO_DICT)
corners2_left, ids_left, _, _ = aruco.refineDetectedMarkers(image=gray_left,
board=self.calibation_board,
detectedCorners=corners2_left,
detectedIds=ids_left,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_left,
distCoeffs=self.D_left)
corners2_right, ids_right, rejectedImgPoints = aruco.detectMarkers(gray_right, self.ARUCO_DICT)
corners2_right, ids_right, _, _ = aruco.refineDetectedMarkers(image=gray_right,
board=self.calibation_board,
detectedCorners=corners2_right,
detectedIds=ids_right,
rejectedCorners=rejectedImgPoints,
cameraMatrix=self.K_right,
distCoeffs=self.D_right)
if np.all(ids_left != None) and np.all(ids_right != None):
print('found charuco board, in both images')
retval_left, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners2_left, ids_left,
self.calibation_board,
self.K_left, self.D_left, None,
None)
retval_right, self.rvecs_right, self.tvecs_right = aruco.estimatePoseBoard(corners2_right,
ids_right,
self.calibation_board,
self.K_right,
self.D_right, None,
None)
if retval_left and retval_right:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
self.tvecs, 0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners2_left, ids_left,
borderColor=(0, 0, 255))
b = 1
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs_right, self.tvecs_right, self.K_right,
self.D_right)
self.corners2_right = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K_left, self.D_left, self.rvecs,
circle_tvec, 0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K_left, self.D_left)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('Cannot estimate board position for both charuco')
self.pixelsPoints = self.corners2.squeeze()
self.pixels_left = self.pixelsPoints
self.pixels_right = self.corners2_right.squeeze()
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
# self.baseline =
self.T = np.array([-1.07, 0.004, 0.215])[:, np.newaxis]
self.baseline = abs(self.T[0])
print('baseline:{} m'.format(self.baseline))
self.focal_length, self.cx, self.cy = self.K[0, 0], self.K[0, 2], self.K[1, 2]
self.x_left, self.x_right = self.pixels_left, self.pixels_right
disparity = np.sum(np.sqrt((self.x_left - self.x_right) ** 2), axis=1)
print('disparity:{}'.format(np.shape(disparity)))
# depth = baseline (meter) * focal length (pixel) / disparity-value (pixel) -> meter
self.depth = (self.baseline * self.focal_length / disparity)
print('depth:{}'.format(np.shape(self.depth)))
self.fxypxy = [self.K[0, 0], self.K[1, 1], self.cx, self.cy]
else:
print('No any board found!!!')
else:
# Undistortion
h, w = self.QueryImg.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.K, self.D, (w, h), 1, (w, h))
dst = cv2.undistort(self.QueryImg, self.K, self.D, None, newcameramtx)
x, y, w, h = roi
self.QueryImg = dst[y:y + h, x:x + w]
gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), None)
if ret: # found chessboard
print('Found chessboard')
self.chessBoard = True
self.corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.criteria)
cv2.drawChessboardCorners(self.QueryImg, (10, 7), corners, ret)
ret, self.rvecs, self.tvecs = cv2.solvePnP(self.objp, self.corners2, self.K, self.D)
# ret, self.rvecs, self.tvecs, inliers = cv2.solvePnPRansac(self.objp, self.corners2, self.K, self.D)
self.imgpts, jac = cv2.projectPoints(self.axis, self.rvecs, self.tvecs, self.K, self.D)
self.QueryImg = self.draw(self.QueryImg, self.corners2, self.imgpts)
self.pixelsPoints = np.asarray(self.corners2).squeeze()
else: # check for charuco
self.chessBoard = False
self.useVoxel = False
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.ARUCO_DICT)
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(
image=gray, board=self.calibation_board, detectedCorners=corners, detectedIds=ids,
rejectedCorners=rejectedImgPoints, cameraMatrix=self.K, distCoeffs=self.D)
if np.all(ids != None):
print('found charuco board, ids:{}'.format(np.shape(ids)))
self.chessBoard = False
if len(ids) > 0:
retval, self.rvecs, self.tvecs = aruco.estimatePoseBoard(corners, ids,
self.calibation_board, self.K,
self.D, None, None)
if retval:
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, self.tvecs,
0.3)
self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, ids,
borderColor=(0, 0, 255))
self.dst, jacobian = cv2.Rodrigues(self.rvecs)
a, circle_tvec, b = .49, [], 1
circle_tvec.append(
np.asarray(self.tvecs).squeeze() + np.dot(self.dst, np.asarray([a, a, 0])))
circle_tvec = np.mean(circle_tvec, axis=0)
self.QueryImg = aruco.drawAxis(self.QueryImg, self.K, self.D, self.rvecs, circle_tvec,
0.2)
imgpts, _ = cv2.projectPoints(self.pts, self.rvecs, self.tvecs, self.K, self.D)
self.corners2 = np.append(imgpts, np.mean(imgpts, axis=0)).reshape(-1, 2)
self.pt_dict = {}
for i in range(len(self.pts)):
self.pt_dict[tuple(self.pts[i])] = tuple(imgpts[i].ravel())
top_right = self.pt_dict[tuple(self.pts[0])]
bot_right = self.pt_dict[tuple(self.pts[1])]
bot_left = self.pt_dict[tuple(self.pts[2])]
top_left = self.pt_dict[tuple(self.pts[3])]
cv2.circle(self.QueryImg, top_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_right, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, bot_left, 4, (0, 0, 255), 5)
cv2.circle(self.QueryImg, top_left, 4, (0, 0, 255), 5)
self.QueryImg = cv2.line(self.QueryImg, top_right, bot_right, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_right, bot_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, bot_left, top_left, (0, 255, 0), 4)
self.QueryImg = cv2.line(self.QueryImg, top_left, top_right, (0, 255, 0), 4)
else:
print('No board Found')
self.image_ax = self.fig.add_subplot(1, 2, 2)
#self.image_ax = self.fig.add_subplot(1, 2, 1)
self.image_ax.imshow(self.QueryImg)
self.image_ax.set_axis_off()
self.image_ax.set_xlabel('Y')
self.image_ax.set_ylabel('Z')
else:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlabel('X', fontsize=10)
self.ax.set_ylabel('Y', fontsize=10)
self.ax.set_zlabel('Z', fontsize=10)
self.fig.tight_layout()
plt.subplots_adjust(left=.15, bottom=0.2)
#plt.subplots_adjust( bottom=0.2)
self.Rx, self.Ry, self.Rz = [np.deg2rad(-90), 0, np.deg2rad(-40)] if self.chessBoard else [0, 0, 0]
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.savePoints = Button(plt.axes([0.03, 0.45, 0.15, 0.04], ), 'filter points', color='white')
self.savePoints.on_clicked(self.getClosestPoints)
self.resetBtn = Button(plt.axes([0.03, 0.25, 0.15, 0.04], ), 'reset', color='white')
self.resetBtn.on_clicked(self.reset)
self.X_btn = Button(plt.axes([0.03, 0.9, 0.024, 0.04], ), 'X', color='red')
self.X_btn.on_clicked(self.Close)
self.OK_btn = Button(plt.axes([0.03, 0.83, 0.074, 0.04], ), 'OK', color='green')
self.OK_btn.on_clicked(self.OK_btnClick)
self.not_OK_btn = Button(plt.axes([0.105, 0.83, 0.074, 0.04], ), 'not OK', color='red')
self.not_OK_btn.on_clicked(self.not_OK_btnClick)
self.saveCorrespondences = Button(plt.axes([0.03, 0.76, 0.15, 0.04], ), 'Save points', color='white')
self.saveCorrespondences.on_clicked(self.savePointsCorrespondences)
self.fitChessboard = Button(plt.axes([0.03, 0.66, 0.15, 0.04], ), 'auto fit', color='white')
self.fitChessboard.on_clicked(self.auto_fitBoard)
# set up sliders
self.Rx_Slider = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Rx', -180, 180.0, valinit=np.degrees(self.Rx))
self.Ry_Slider = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'Ry', -180, 180.0, valinit=np.degrees(self.Ry))
self.Rz_Slider = Slider(plt.axes([0.25, 0.05, 0.65, 0.03]), 'Rz', -180, 180.0, valinit=np.degrees(self.Rz))
self.Rx_Slider.on_changed(self.update_R)
self.Ry_Slider.on_changed(self.update_R)
self.Rz_Slider.on_changed(self.update_R)
self.check = CheckButtons(plt.axes([0.03, 0.3, 0.15, 0.12]), ('Axes', 'Black', 'Annotate'),
(self.axis_on, self.colour, self.Annotate))
self.check.on_clicked(self.func_CheckButtons)
# set up translation buttons
self.step = .1 # m
self.trigger = True
self.Tx_btn_plus = Button(plt.axes([0.05, 0.15, 0.04, 0.045]), '+Tx', color='white')
self.Tx_btn_plus.on_clicked(self.Tx_plus)
self.Tx_btn_minus = Button(plt.axes([0.12, 0.15, 0.04, 0.045]), '-Tx', color='white')
self.Tx_btn_minus.on_clicked(self.Tx_minus)
self.Ty_btn_plus = Button(plt.axes([0.05, 0.1, 0.04, 0.045]), '+Ty', color='white')
self.Ty_btn_plus.on_clicked(self.Ty_plus)
self.Ty_btn_minus = Button(plt.axes([0.12, 0.1, 0.04, 0.045]), '-Ty', color='white')
self.Ty_btn_minus.on_clicked(self.Ty_minus)
self.Tz_btn_plus = Button(plt.axes([0.05, 0.05, 0.04, 0.045]), '+Tz', color='white')
self.Tz_btn_plus.on_clicked(self.Tz_plus)
self.Tz_btn_minus = Button(plt.axes([0.12, 0.05, 0.04, 0.045]), '-Tz', color='white')
self.Tz_btn_minus.on_clicked(self.Tz_minus)
self.Tx_flip = Button(plt.axes([0.17, 0.15, 0.04, 0.045]), 'FlipX', color='white')
self.Tx_flip.on_clicked(self.flipX)
self.Ty_flip = Button(plt.axes([0.17, 0.1, 0.04, 0.045]), 'FlipY', color='white')
self.Ty_flip.on_clicked(self.flipY)
self.Tz_flip = Button(plt.axes([0.17, 0.05, 0.04, 0.045]), 'FlipZ', color='white')
self.Tz_flip.on_clicked(self.flipZ)
self.radio = RadioButtons(plt.axes([0.03, 0.5, 0.15, 0.15], ), ('Final', 'Init'), active=0)
self.radio.on_clicked(self.colorfunc)
self.tag = None
self.circle_center = None
self.errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d.",
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible.",
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
self.legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Original pointcloud', markerfacecolor='g', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Corners', markerfacecolor='k', markersize=4),
Line2D([0], [0], marker='o', color='w', label='Margins', markerfacecolor='r', markersize=4),
]
def setUp(self):
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.board()
self.ax.legend(handles=self.legend_elements, loc='best')
if self.showImage:
self.getDepth_Inside_Outside()
self.fitNewPlan()
def auto_fitBoard(self, args):
# estimate 3D-R and 3D-t between chess and PointCloud
# Inital guess of the transformation
x0 = np.array([np.degrees(self.Rx), np.degrees(self.Ry), np.degrees(self.Rz), self.Tx, self.Ty, self.Tz])
report = {"error": [], "template": []}
def f_min(x):
self.Rx, self.Ry, self.Rz = np.deg2rad(x[0]), np.deg2rad(x[1]), np.deg2rad(x[2])
self.Tx, self.Ty, self.Tz = x[3], x[4], x[5]
template = self.board(plot=False)
if self.useInitialPointCloud:
dist_mat = distance_matrix(template, self.point_cloud)
else:
dist_mat = distance_matrix(template, self.corners_)
err_func = dist_mat.sum(axis=1) # N x 1
# err_func = dist_mat.sum(axis=0) # N x 1
if self.debug:
print('errors = {}, dist_mat:{}, err_func:{}'.format(round(np.sum(err_func), 2), np.shape(dist_mat),
np.shape(err_func)))
report["error"].append(np.sum(err_func))
report["template"].append(template)
return err_func
maxIters = 700
sol, status = leastsq(f_min, x0, ftol=1.49012e-07, xtol=1.49012e-07, maxfev=maxIters)
print('sol:{}, status:{}'.format(sol, status))
print(self.errors[status])
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.ICP_finetune_plot:
self.ICP_finetune_plot.remove()
self.lowerTemplate = False
self.board()
point_cloud = np.asarray(self.point_cloud, dtype=np.float32)
template = np.asarray(report["template"][0], dtype=np.float32) if self.applyICP_directly else np.asarray(
self.template_cloud, dtype=np.float32)
converged, self.transf, estimate, fitness = self.ICP_finetune(template, point_cloud)
# converged, self.transf, estimate, fitness = self.ICP_finetune(point_cloud,template)
self.estimate = np.array(estimate)
if self.chessBoard:
self.ICP_finetune_plot = self.ax.scatter(self.estimate[:, 0], self.estimate[:, 1], self.estimate[:, 2],
c='k', marker='o', alpha=0.8, s=4)
else:
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = self.estimate[idx, :]
self.ICP_finetune_plot = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2],
c='k', marker='o', alpha=0.8, s=4)
self.trigger = False
# set values of sol to Sliders
self.Rx_Slider.set_val(np.rad2deg(self.Rx))
self.Ry_Slider.set_val(np.rad2deg(self.Ry))
self.Rz_Slider.set_val(np.rad2deg(self.Rz))
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.trigger = True
self.board()
self.AnnotateEdges()
self.fig.canvas.draw_idle()
if self.showError:
print('min error:{} , at index:{}'.format(np.min(report["error"]), np.argmin(report["error"])))
rep = plt.figure(figsize=(15, 8))
plt.xlim(0, len(report["error"]) + 1)
plt.xlabel('Iteration')
plt.ylabel('RMSE')
plt.yticks(color='w')
plt.plot(np.arange(len(report["error"])) + 1, report["error"])
print('Start animation gif')
def update_graph(num):
data = np.asarray(report["template"][num])
graph._offsets3d = (data[:, 0], data[:, 1], data[:, 2])
title.set_text('Iteration {}'.format(num))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
title = ax.set_title('3D Test')
data = report["template"][0]
graph = ax.scatter(data[:, 0], data[:, 1], data[:, 2])
ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2])
ani = animation.FuncAnimation(fig, update_graph, 101, interval=2, blit=False, repeat=False)
ani.save('myAnimation.gif', writer='imagemagick', fps=30)
print('Animation done')
plt.show()
def flipX(self, event):
self.Rx_Slider.set_val(np.rad2deg(self.Rx + np.pi))
self.update_R(0)
def flipY(self, event):
self.Ry_Slider.set_val(np.rad2deg(self.Ry + np.pi))
self.update_R(0)
def flipZ(self, event):
self.Rz_Slider.set_val(np.rad2deg(self.Rz + np.pi))
self.update_R(0)
def update_R(self, val):
if self.trigger:
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
self.Rx = np.deg2rad(self.Rx_Slider.val)
self.Ry = np.deg2rad(self.Ry_Slider.val)
self.Rz = np.deg2rad(self.Rz_Slider.val)
self.board()
self.fig.canvas.draw_idle()
def board(self, plot=True, given_origin=None, angle=None):
self.board_origin = [self.Tx, self.Ty, self.Tz] if given_origin is None else given_origin
if self.chessBoard:
self.nCols, self.nRows, org = 7 + 2, 10 + 2, np.asarray(self.board_origin)
#org[0] -= self.nCols / 2
#org[1] -= self.nRows / 2
org[0] -= 4
org[1] -= 6
#org = np.zeros(3)
if self.lowerTemplate:
nrCols, nrRows = 2, 3
else:
nrCols, nrRows = self.nCols, self.nRows
#nrCols, nrRows = self.nCols+1, self.nRows+1 #remove later
print('org:{}, self.nCols - >{}, nrCols:{}'.format(org,self.nCols,nrCols))
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,num=nrRows)
X, Y = np.linspace(org[0], org[0] + self.nCols-1, num=nrCols), np.linspace(org[1], org[1] + self.nRows-1,
num=nrRows)
print('X:{}'.format(X))
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
colors, colortuple = np.empty(X.shape, dtype=str), ('k', 'w')
for y in range(nrCols):
for x in range(nrRows):
colors[x, y] = colortuple[(x + y) % len(colortuple)]
colors[0, 0] = 'r'
alpha = 0.65
else:
self.nCols, self.nRows, org = 10, 10, np.asarray(self.board_origin)
org[0] -= self.nCols / 2
org[1] -= self.nRows / 2
# nrCols, nrRows = 4,4z
nrCols, nrRows = self.nCols, self.nRows
# nrCols, nrRows = 20, 20
X, Y = np.linspace(org[0], org[0] + self.nCols, num=nrCols), np.linspace(org[1], org[1] + self.nRows,
num=nrRows)
X, Y = np.meshgrid(X, Y)
Z = np.full(np.shape(X), org[2])
alpha = 0.25
angles = np.array([self.Rx, self.Ry, self.Rz]) if angle is None else np.array(angle)
Rot_matrix = self.eulerAnglesToRotationMatrix(angles)
X, Y, Z = X * self.s, Y * self.s, Z * self.s
corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0))
init = corners.reshape(-1, 3)
print('corners-----------------------------------------------------')
#print(init)
print('corners -> {}'.format(np.shape(init)))
dist_Lidar = distance_matrix(init, init)
print('dist_Lidar corners---------------------------------------------------------')
print(dist_Lidar[0, :11])
translation = np.mean(init, axis=0) # get the mean point
corners = np.subtract(corners, translation) # substract it from all the other points
X, Y, Z = np.transpose(np.add(np.dot(corners, Rot_matrix), translation), (2, 0, 1))
# corners = np.transpose(np.array([X, Y, Z]), (1, 2, 0)).reshape(-1, 3)
corners = np.transpose(np.array([X, Y, Z]), (2, 1, 0)).reshape(-1, 3)
if plot:
if self.chessBoard:
self.chess = self.ax.plot_surface(X, Y, Z, facecolors=colors, linewidth=0.2, cmap='gray', alpha=alpha)
else:
self.chess = self.ax.plot_surface(X, Y, Z, linewidth=0.2, cmap='gray', alpha=alpha)
idx = np.arange(start=0, stop=100, step=1)
idx = np.delete(idx, [44, 45, 54, 55])
cornersToPLot = corners[idx, :]
self.corn = self.ax.scatter(cornersToPLot[:, 0], cornersToPLot[:, 1], cornersToPLot[:, 2], c='tab:blue',
marker='o', s=5)
self.template_cloud = corners
return np.array(corners)
def getPointCoud(self, colorsMap='jet', skip=1, useRing = True):
# X, Y, Z, intensity, ring
if useRing:
originalCloud = np.array(np.load(self.file, mmap_mode='r'))[:,:5]
if InitLidar:
xyz = originalCloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
originalCloud[:, 0:3] = new_xyz
#mean_x = np.mean(originalCloud[:, 0])
#originalCloud[:, 0] = mean_x
df = pd.DataFrame(data=originalCloud, columns=["X", "Y", "Z","intens","ring"])
gp = df.groupby('ring')
keys = gp.groups.keys()
#groups = gp.groups
coolPoints, circlePoints = [],[]
for i in keys:
line = np.array(gp.get_group(i), dtype=np.float)
first,last = np.array(line[0], dtype=np.float)[:3],np.array(line[-1], dtype=np.float)[:3]
coolPoints.append(first)
coolPoints.append(last)
if self.chessBoard == False:
if len(line) > 50:
l = line[:,:3]
for i in range(2,len(l)-2,1):
d = np.linalg.norm(l[i]-l[i+1])
if d > 0.08: #half of the circle
circlePoints.append(l[i])
circlePoints.append(l[i+1])
self.coolPoints = np.array(coolPoints).squeeze()
self.ax.scatter(*self.coolPoints.T, color='r', marker='o', alpha=1, s=2)
print('coolPoints:{}, circlePoints:{}'.format(np.shape(self.coolPoints), np.shape(circlePoints)))
circlePoints = np.array(circlePoints)
if len(circlePoints)>0:
self.ax.scatter(*circlePoints.T, color='r', marker='o', alpha=1, s=5)
self.fitCircle(circlePoints)
#self.point_cloud = np.array(self.coolPoints, dtype=np.float32)
self.point_cloud = np.array(np.load(self.file, mmap_mode='r')[::skip, :3], dtype=np.float32)
if InitLidar:
xyz = self.point_cloud[:, 0:3]
new_xyz = np.dot(xyz, Rot_matrix)
self.point_cloud[:, 0:3] = new_xyz
# center the point_cloud
#mean_x = np.mean(self.point_cloud[:, 0])
#self.point_cloud[:, 0] = mean_x
self.point_cloud_mean = np.mean(self.point_cloud, axis=0)
self.Tx, self.Ty, self.Tz = self.point_cloud_mean
# self.point_cloud = self.point_cloud - self.point_cloud_mean
self.point_cloud_colors = np.array(np.load(self.file, mmap_mode='r'))[::skip, 3]
if self.plotInit:
cm = plt.get_cmap(colorsMap)
cNorm = matplotlib.colors.Normalize(vmin=min(self.point_cloud_colors), vmax=max(self.point_cloud_colors))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
self.p1 = self.ax.scatter(self.point_cloud[:, 0], self.point_cloud[:, 1], self.point_cloud[:, 2],
color=scalarMap.to_rgba(self.point_cloud_colors), s=0.2)
else:
self.p = pcl.PointCloud(self.point_cloud)
inlier, outliner, coefficients = self.do_ransac_plane_segmentation(self.p, pcl.SACMODEL_PLANE,
pcl.SAC_RANSAC, 0.01)
#self.planeEquation(coef=np.array(coefficients).squeeze())
self.point_cloud_init = self.point_cloud.copy()
if self.useVoxel:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.point_cloud)
self.point_cloud = np.array(pcd.voxel_down_sample(voxel_size=self.voxel_size).points)
# self.p1 = self.ax.scatter(outliner[:, 0], outliner[:, 1], outliner[:, 2], c='y', s=0.2)
self.p2 = self.ax.scatter(inlier[:, 0], inlier[:, 1], inlier[:, 2], c='g', s=0.2)
w, v = self.PCA(inlier)
point = np.mean(inlier, axis=0)
if self.chessBoard == False and self.circle_center:
#point[1:] = self.circle_center
point[[0,2]]= self.circle_center
w *= 2
if self.chessBoard==False and self.circle_center:
p = Circle(self.circle_center, self.circle_radius, alpha = .3, color='tab:blue')
self.ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=point[1], zdir="y")
self.p3 = self.ax.quiver([point[0]], [point[1]], [point[2]], [v[0, :] * np.sqrt(w[0])],
[v[1, :] * np.sqrt(w[0])],
[v[2, :] * np.sqrt(w[0])], linewidths=(1.8,))
def axisEqual3D(self, centers=None):
extents = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
# centers = np.mean(extents, axis=1) if centers is None
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(self.ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def planeEquation(self, coef):
a, b, c, d = coef
mean = np.mean(self.point_cloud, axis=0)
normal = [a, b, c]
d2 = -mean.dot(normal)
# print('d2:{}'.format(d2))
# print('mean:{}'.format(mean))
# print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))
# plot the normal vector
startX, startY, startZ = mean[0], mean[1], mean[2]
startZ = (-normal[0] * startX - normal[1] * startY - d) * 1. / normal[2]
self.ax.quiver([startX], [startY], [startZ], [normal[0]], [normal[1]], [normal[2]], linewidths=(3,),edgecolor="red")
def PCA(self, data, correlation=False, sort=True):
# data = nx3
mean = np.mean(data, axis=0)
data_adjust = data - mean
#: the data is transposed due to np.cov/corrcoef syntax
if correlation:
matrix = np.corrcoef(data_adjust.T)
else:
matrix = np.cov(data_adjust.T)
eigenvalues, eigenvectors = np.linalg.eig(matrix)
if sort:
#: sort eigenvalues and eigenvectors
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def eulerAnglesToRotationMatrix(self, theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def do_ransac_plane_segmentation(self, pcl_data, pcl_sac_model_plane, pcl_sac_ransac, max_distance):
"""
Create the segmentation object
:param pcl_data: point could data subscriber
:param pcl_sac_model_plane: use to determine plane models
:param pcl_sac_ransac: RANdom SAmple Consensus
:param max_distance: Max distance for apoint to be considered fitting the model
:return: segmentation object
"""
seg = pcl_data.make_segmenter()
seg.set_model_type(pcl_sac_model_plane)
seg.set_method_type(pcl_sac_ransac)
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
inlier_object = pcl_data.extract(inliers, negative=False)
outlier_object = pcl_data.extract(inliers, negative=True)
if len(inliers) <= 1:
outlier_object = [0, 0, 0]
inlier_object, outlier_object = np.array(inlier_object), np.array(outlier_object)
return inlier_object, outlier_object, coefficients
def func_CheckButtons(self, label):
if label == 'Axes':
if self.axis_on:
self.ax.set_axis_off()
self.axis_on = False
else:
self.ax.set_axis_on()
self.axis_on = True
elif label == 'Black':
if self.colour:
self.colour = False
self.ax.set_facecolor((1, 1, 1))
else:
self.colour = True
self.ax.set_facecolor((0, 0, 0))
elif label == 'Annotate':
self.Annotate = not self.Annotate
self.AnnotateEdges()
self.fig.canvas.draw_idle()
def ICP_finetune(self, points_in, points_out):
cloud_in = pcl.PointCloud()
cloud_out = pcl.PointCloud()
cloud_in.from_array(points_in)
cloud_out.from_array(points_out)
# icp = cloud_in.make_IterativeClosestPoint()
icp = cloud_out.make_IterativeClosestPoint()
converged, transf, estimate, fitness = icp.icp(cloud_in, cloud_out)
print('fitness:{}, converged:{}, transf:{}, estimate:{}'.format(fitness, converged, np.shape(transf),
np.shape(estimate)))
return converged, transf, estimate, fitness
def colorfunc(self, label):
if label == 'Init':
self.plotInit = True
else:
self.plotInit = False
self.reset(0)
def OK_btnClick(self, args):
self.OK = True
plt.close()
def not_OK_btnClick(self, args):
self.OK = False
plt.close()
def Close(self, args):
global globalTrigger
globalTrigger = False
plt.close()
def reset(self, args):
self.ax.cla()
self.getPointCoud()
self.axisEqual3D(centers=np.mean(self.point_cloud, axis=0))
self.Rx, self.Ry, self.Rz = 0, 0, 0
self.Tx, self.Ty, self.Tz = 0, 0, 0
self.board_origin = [self.Tx, self.Ty, self.Tz]
self.board()
self.fig.canvas.draw_idle()
def getClosestPoints(self, arg):
dist_mat = distance_matrix(self.template_cloud, self.point_cloud_init)
self.neighbours = np.argsort(dist_mat, axis=1)[:, 0]
self.finaPoints = np.asarray(self.point_cloud_init[self.neighbours, :]).squeeze()
if self.chess:
self.chess.remove()
if self.corn:
self.corn.remove()
if self.p3:
self.p3.remove()
if self.p2:
self.p2.remove()
if self.p1:
self.p1.remove()
self.scatter_finalPoints = self.ax.scatter(self.finaPoints[:, 0], self.finaPoints[:, 1], self.finaPoints[:, 2],
c='k', marker='x', s=1)
self.corn = self.ax.scatter(self.template_cloud[:, 0], self.template_cloud[:, 1], self.template_cloud[:, 2],
c='blue', marker='o', s=5)
self.fig.canvas.draw_idle()
def Tz_plus(self, event):
self.Tz += self.step
self.update_R(0)
def Tz_minus(self, event):
self.Tz -= self.step
self.update_R(0)
def Ty_plus(self, event):
self.Ty += self.step
self.update_R(0)
def Ty_minus(self, event):
self.Ty -= self.step
self.update_R(0)
def Tx_plus(self, event):
self.Tx += self.step
self.update_R(0)
def Tx_minus(self, event):
self.Tx -= self.step
self.update_R(0)
def readCameraIntrin(self):
name = 'inside'
name = 'outside'
self.camera_model = load_obj('{}_combined_camera_model'.format(name))
self.camera_model_rectify = load_obj('{}_combined_camera_model_rectify'.format(name))
self.K_left = self.camera_model['K_left']
self.K_right = self.camera_model['K_right']
self.D_left = self.camera_model['D_left']
self.D_right = self.camera_model['D_right']
# self.K_left = self.camera_model['K_right']
# self.K_right = self.camera_model['K_left']
# self.D_left = self.camera_model['D_right']
# self.D_right = self.camera_model['D_left']
# print('K_left')
# print(self.K_left)
# print('K_right')
# print(self.K_right)
self.R = self.camera_model['R']
self.T = self.camera_model['T']
self.T = np.array([-0.977, 0.004, 0.215])[:, np.newaxis]
angles = np.array([np.deg2rad(1.044), np.deg2rad(22.632), np.deg2rad(-.95)])
self.R = euler_matrix(angles)
#self.T = np.array([-0.98, 0., 0.12])[:, np.newaxis]
#self.T = np.array([-.75, 0., 0.])[:, np.newaxis]
#print('self T after {}'.format(np.shape(self.T)))
#angles = np.array([np.deg2rad(0.68), np.deg2rad(22.66), np.deg2rad(-1.05)])
#self.R = euler_matrix(angles)
#Q = self.camera_model_rectify['Q']
#roi_left, roi_right = self.camera_model_rectify['roi_left'], self.camera_model_rectify['roi_right']
self.leftMapX, self.leftMapY = self.camera_model_rectify['leftMapX'], self.camera_model_rectify['leftMapY']
self.rightMapX, self.rightMapY = self.camera_model_rectify['rightMapX'], self.camera_model_rectify['rightMapY']
img_shape = (1936, 1216)
print('img_shape:{}'.format(img_shape))
R1, R2, P1, P2, Q, roi_left, roi_right = cv2.stereoRectify(self.K_left, self.D_left, self.K_right, self.D_right,
imageSize=img_shape,
R=self.camera_model['R'], T=self.camera_model['T'],
flags=cv2.CALIB_ZERO_DISPARITY,
alpha=-1
#alpha=0
)
self.leftMapX, self.leftMapY = cv2.initUndistortRectifyMap(
self.K_left, self.D_left, R1,
P1, img_shape, cv2.CV_32FC1)
self.rightMapX, self.rightMapY = cv2.initUndistortRectifyMap(
self.K_right, self.D_right, R2,
P2, img_shape, cv2.CV_32FC1)
self.K = self.K_right
self.D = self.D_right
try:
N = 5
aruco_dict = aruco.custom_dictionary(0, N, 1)
aruco_dict.bytesList = np.empty(shape=(4, N - 1, N - 1), dtype=np.uint8)
A = np.array([[0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[0] = aruco.Dictionary_getByteListFromBits(A)
R = np.array([[1, 1, 1, 1, 0], [1, 0, 0, 1, 0], [1, 1, 1, 0, 0], [1, 0, 0, 1, 0], [1, 0, 0, 0, 1]],
dtype=np.uint8)
aruco_dict.bytesList[1] = aruco.Dictionary_getByteListFromBits(R)
V = np.array([[1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0]],
dtype=np.uint8)
O = np.array([[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]],
dtype=np.uint8)
aruco_dict.bytesList[2] = aruco.Dictionary_getByteListFromBits(O)
aruco_dict.bytesList[3] = aruco.Dictionary_getByteListFromBits(V)
self.ARUCO_DICT = aruco_dict
self.calibation_board = aruco.GridBoard_create(
markersX=2, markersY=2,
markerLength=0.126, markerSeparation=0.74,
dictionary=self.ARUCO_DICT)
except:
print('Install Aruco')
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)
cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)
cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)
return img
def annotate3D(self, ax, s, *args, **kwargs):
self.tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(self.tag)
def AnnotateEdges(self, giveAX=None, givenPoints=None):
if self.Annotate:
# add vertices annotation.
if giveAX is None:
if self.lowerTemplate or self.chessBoard == False:
if self.chessBoard == False:
pts = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)
idx = np.array([44, 45, 54, 55])
center = np.mean(self.template_cloud[idx], axis=0)
self.templatePoints = [pts[0, -1, :], pts[-1, -1, :], pts[-1, 0, :], pts[0, 0, :], center]
self.templatePoints = np.array(self.templatePoints).reshape(-1, 3)
cornersToPLot = self.estimate[idx, :]
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=12, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(self.template_cloud):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-1, 1),
textcoords='offset points', ha='right', va='bottom')
else:
try:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols, self.nRows, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
except:
templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nCols+1, self.nRows+1, 3)[
1:self.nCols - 1, 1:self.nRows - 1, :]
# templatePoints = np.asarray(self.template_cloud.copy()).reshape(self.nRows,self.nCols, 3)[1:self.nRows-1,1:self.nCols-1,:]
self.templatePoints = np.array(templatePoints).reshape(-1, 3)
for j, xyz_ in enumerate(self.templatePoints):
self.annotate3D(self.ax, s=str(j), xyz=xyz_, fontsize=8, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
else:
for j, xyz_ in enumerate(givenPoints):
self.annotate3D(giveAX, s=str(j), xyz=xyz_, fontsize=10, xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom')
if self.showImage:
# annotate image
points = np.asarray(self.corners2).squeeze()
font, lineType = cv2.FONT_HERSHEY_SIMPLEX, 2 if self.chessBoard else 10
for i, point in enumerate(points):
point = tuple(point.ravel())
cv2.putText(self.QueryImg, '{}'.format(i), point, font, 1 if self.chessBoard else 3, (0, 0, 0)
if self.chessBoard else (255, 0, 0), lineType)
self.image_ax.imshow(self.QueryImg)
def getCamera_XYZ_Stereo(self):
#cam_rot, jac = cv2.Rodrigues(self.rvecs)
#mR = np.matrix(cam_rot)
#mT = np.matrix(self.tvecs)
#cam_trans = -mR * mT
_3DPoints = []
for i, pixel in enumerate(self.x_left):
u, v = pixel.ravel()
u, v = int(u), int(v)
distance = self.depth[i]
pt = np.array([u, v, distance])
pt[0] = pt[2] * (pt[0] - self.fxypxy[2]) / self.fxypxy[0]
pt[1] = pt[2] * (pt[1] - self.fxypxy[3]) / self.fxypxy[1]
# pt = pt.dot(cam_rot.T) + self.tvecs
_3DPoints.append(pt)
print('_3DPoints {}'.format(np.shape(_3DPoints)))
print('tvec : {}'.format( | np.asarray(self.tvecs) | numpy.asarray |
import numpy as np
import pandas as pd
# 在这个案例中正确分类,要求标签是-1和+1,sign函数方便
def sigmoid(inX): #sigmoid函数
return 1.0 / (1 + np.exp(-inX))
def stump_classify(data_matrix, dimen, thresh_val, thresh_ineq): # just classify the data
# 通过阈值比较,一边分成-1,一边为1,可以通过数组对比
ret_array = np.ones((np.shape(data_matrix)[0], 1))
# 看看是哪一边取-1
if thresh_ineq == 'lt':
ret_array[data_matrix[:, dimen] <= thresh_val] = -1.0
else:
ret_array[data_matrix[:, dimen] > thresh_val] = -1.0
return ret_array
# 这是一个弱决策器
def build_stump(data_arr, class_labels, d):
# 遍历所有可能的值到stump_classify种,找到最佳的单层决策树
# 这里的最佳是通过权重向量d来定义
data_matrix = np.mat(data_arr)
label_mat = np.mat(class_labels).T # 让它站起来
m, n = np.shape(data_matrix)
num_steps = 10.0 # 在特征所有可能值上遍历,超出也无所谓
best_stump = {} # 这个字典存储给定权重d时所得到的最佳决策树相关信息
best_clas_est = np.mat(np.zeros((m, 1)))
min_error = np.inf # init error sum, to +infinity,一开始被初始为无穷大
for i in range(n): # loop over all dimensions一层在所有特征上遍历
range_min = data_matrix[:, i].min() # 固定列的最小值
range_max = data_matrix[:, i].max()
step_size = (range_max - range_min) / num_steps # 设定步长
# 二层遍历所有当前特征
for j in range(-1, int(num_steps) + 1): # loop over all range in current dimension
# 在大于小于特征间切换不等式
for inequal in ['lt', 'gt']: # go over less than and greater than
thresh_val = (range_min + float(j) * step_size) # 阈值慢慢挪动
# 调用前面定义的分类函数
predicted_vals = stump_classify(data_matrix, i, thresh_val,
inequal) # call stump classify with i, j, lessThan
err_arr = np.mat(np.ones((m, 1)))
err_arr[predicted_vals == label_mat] = 0 # 分错了就是0
weighted_error = d.T * err_arr # calc total error multiplied by D
# print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (
# i, thresh_val, inequal, weighted_error))
if weighted_error < min_error:
# 如果找到了一个最好的版本,就全部换成这个
min_error = weighted_error
best_clas_est = predicted_vals.copy()
best_stump['dim'] = i
best_stump['thresh'] = thresh_val
best_stump['ineq'] = inequal
return best_stump, min_error, best_clas_est
def adaboost_trainDS(data_arr, class_labels, num_iter=40):
# 输入,数据集,类别标签,迭代次数num_iter
# DS,单层决策树,decision stump,最流行的弱分类器。但事实上,任何分类器都可以充当弱分类器
weak_class_arr = [] # 一个单层决策树的数组
m = np.shape(data_arr)[0]
d = np.mat( | np.ones((m, 1)) | numpy.ones |
#PyPI modules
import matplotlib.pyplot as plt
import numpy as np
import gym
from gym import spaces
class two_zone_HVAC(gym.Env):
"""
Buck converter model following gym interface
We are assuming that the switching frequency is very High
Action space is continious
"""
metadata = {'render.modes': ['console']}
def __init__(self, d, A = None, COP =4, T_set_max_min = [23., 26.]):
super(two_zone_HVAC, self).__init__()
#parameters
if A is not None:
self.A = A
else:
self.A = np.array([[0.4670736444788445, 0, 0.473590433381762, 0.027560814480025012, 0.02482360723716469, 0, 0],
[0, 0.169849447097808, 1.2326345328482877, -1.2018861561221592, -1.4566448096944626, 0.004739745164037462, 0.002503902132835721]])
#For the default parameters the step size should in the order of 1e-3
#the steady-state equilibrium of the system is
self.COP = COP
self.d=d
self.T_set_max_min = T_set_max_min
#The control action is
#self.action_space = spaces.Box(low=23, high=26, shape=(1,), dtype=np.float32)
self.action_space = spaces.Box(low=-1, high=1, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(low=np.array([-np.inf, -np.inf]), high=np.array([+np.inf, +np.inf]), shape=None, dtype=np.float32)
self._get_state()
#lists to save the states and actions
self.state_trajectory = []
self.action_trajectory = []
self.count_steps = 0 # counts the number of steps taken
self.total_no_of_steps = np.shape(self.d)[0]
def _get_state(self):
#initializing the state vector near to the desired values
T = np.random.uniform(low = 22, high = 25)
Q = -np.random.uniform(low = 20, high = 60)
self.state = np.array([T, Q])
def _set_state(self, T, Q):
#using this function we can change the state variable
self.state = | np.array([T, Q]) | numpy.array |
"""Information Retrieval metrics
Useful Resources:
http://www.cs.utexas.edu/~mooney/ir-course/slides/Evaluation.ppt
http://www.nii.ac.jp/TechReports/05-014E.pdf
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
http://hal.archives-ouvertes.fr/docs/00/72/67/60/PDF/07-busa-fekete.pdf
Learning to Rank for Information Retrieval (Tie-Yan Liu)
"""
import numpy as np
from functools import partial
from numpy.core.fromnumeric import mean
def get_rounded_percentage(float_number, n_floats=2):
return round(float_number * 100, n_floats)
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return | np.mean(r) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy.linalg import block_diag, kron, solve, lstsq, norm
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import lsmr as splsmr
from ..helper.plotting import Anim
from ..forcing import sineForce, toMDOF
from .hbcommon import fft_coeff, ifft_coeff, hb_signal, hb_components
from .stability import Hills
from .bifurcation import Fold, NS, BP
class HB():
def __init__(self, M0, C0, K0, nonlin,
NH=3, npow2=8, nu=1, scale_x=1, scale_t=1,
amp0=1e-3, tol_NR=1e-6, max_it_NR=15,
stability=True, rcm_permute=False, anim=True,
xstr='Hz',sca=1/(2*np.pi)):
"""Because frequency(in rad/s) and amplitude have different orders of
magnitude, time and displacement have to be rescaled to avoid
ill-conditioning.
Parameters
----------
Harmonic balance parameters:
NH: number of harmonic retained in the Fourier series
npow: number of time samples in the Fourier transform, ie. 2^8=256
samples
nu: accounts for subharmonics of excitation freq w0
amp0: amplitude of first guess
"""
self.NH = NH
self.npow2 = npow2
self.nu = nu
self.scale_x = scale_x
self.scale_t = scale_t
self.amp0 = amp0
self.stability = stability
self.tol_NR = tol_NR * scale_x
self.max_it_NR = max_it_NR
self.rcm_permute = rcm_permute
self.nt = 2**npow2
self.M = M0 * scale_x / scale_t**2
self.C = C0 * scale_x / scale_t
self.K = K0 * scale_x
self.n = M0.shape[0]
self.nonlin = nonlin
self.anim = anim
self.xstr = xstr
self.sca = sca
# number of unknowns in Z-vector, eq (4)
self.nz = self.n * (2 * NH + 1)
self.z_vec = []
self.xamp_vec = []
self.omega_vec = []
self.step_vec = [0]
self.stab_vec = []
# Floquet exponents(λ). Estimated from Hills matrix. Related to
# multipliers(σ) by σ=e^(λ*T) where T is the period.
self.lamb_vec = []
# list of bifurcations that are searched for
self.bif = []
def periodic(self, f0, f_amp, fdofs):
"""Find periodic solution
NR iteration to solve:
# Solve h(z,ω)=A(ω)-b(z)=0 (ie find z that is root of this eq), eq. (21)
# NR solution: (h_z is the derivative of h wrt z)
# zⁱ⁺¹ = zⁱ - h(zⁱ,ω)/h_z(zⁱ,ω)
# h_z = A(ω) - b_z(z) = A - Γ⁺ ∂f/∂x Γ
# where the last exp. is in time domain. df/dx is thus available
# analytical. See eq (30)
Parameters
----------
f0: float
Forcing frequency in Hz
f_amp: float
Forcing amplitude
"""
self.f0 = f0
self.f_amp = f_amp
self.fdofs = fdofs
nu = self.nu
NH = self.NH
n = self.n
nz = self.nz
scale_x = self.scale_x
scale_t = self.scale_t
amp0 = self.amp0
stability = self.stability
tol_NR = self.tol_NR
max_it_NR = self.max_it_NR
w0 = f0 * 2*np.pi
omega = w0*scale_t
omega2 = omega / nu
t = self.assemblet(omega2)
nt = len(t)
u, _ = sineForce(f_amp, omega=omega, t=t)
force = toMDOF(u, n, fdofs)
# Assemble A, describing the linear dynamics. eq (20)
A = self.assembleA(omega2)
# Form Q(t), eq (8). Q is orthogonal trigonometric basis(harmonic
# terms) Then use Q to from the kron product Q(t) ⊗ Iₙ, eq (6)
mat_func_form = np.empty((n*nt, nz))
Q = np.empty((NH*2+1,1))
for i in range(nt):
Q[0] = 1
for ii in range(1,NH+1):
Q[ii*2-1] = np.sin(omega * t[i]*ii)
Q[ii*2] = np.cos(omega * t[i]*ii)
# Stack the kron prod, so each block row is for time(i)
mat_func_form[i*n:(i+1)*n,:] = kron(Q.T, np.eye(n))
self.mat_func_form_sparse = csr_matrix(mat_func_form)
if stability:
hills = Hills(self)
self.hills = hills
# Initial guess for x. Here calculated by broadcasting. np.outer could
# be used instead to calculate the outer product
amp = | np.ones(n) | numpy.ones |
#!/usr/bin/env python3
"""
Gaussian elimination over the rationals.
See also: elim.py
"""
import sys, os
from random import randint, seed
from fractions import Fraction
import numpy
from numpy import dot
from bruhat.smap import SMap
from bruhat.argv import argv
def write(s):
sys.stdout.write(str(s)+' ')
sys.stdout.flush()
def fstr(x):
x = Fraction(x)
a, b = x.numerator, x.denominator
if b==1:
return str(a)
if a==0:
return "."
return "%s/%s"%(a, b)
def shortstr(*items, **kw):
if len(items)>1:
return shortstrx(*items, **kw)
A = items[0]
if type(A) in [list, tuple]:
A = array(A)
shape = kw.get("shape")
if shape:
A = A.view()
A.shape = shape
if len(A.shape)==1:
A = A.view()
A.shape = (1, len(A))
m, n = A.shape
items = {}
dw = 3
for i in range(m):
for j in range(n):
x = A[i, j]
s = fstr(x)
dw = max(dw, len(s)+1)
items[i, j] = s
smap = SMap()
for i in range(m):
smap[i, 0] = "["
smap[i, n*dw+1] = "]"
for j in range(n):
s = items[i, j]
s = s.rjust(dw-1)
smap[i, j*dw+1] = s
s = str(smap)
s = s.replace(" 0 ", " . ")
return s
def shortstrx(*items, **kw):
smaps = [shortstr(item, **kw) for item in items]
smap = SMap()
col = 0
for A in items:
s = shortstr(A)
smap[0, col] = s
col += s.cols + 1
return smap
def zeros(m, n):
A = numpy.empty((m, n), dtype=object)
A[:] = 0
return A
def array(items):
return numpy.array(items, dtype=object)
def identity(m):
I = zeros(m, m)
for i in range(m):
I[i, i] = 1
return I
def eq(A, B):
r = | numpy.abs(A-B) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 04 22:18:20 2018
@author0: MIUK
@author1: FS
Generate datasets for ANN training testing.
TODO: Ensure compatibility with ANN scripts
TODO: Look at shape of Nuke's input file
"""
from ..utility import bipartite
import numpy as np
def gen_dataset(nb_train, nb_test, nb_m, e_min = 0, e_max = np.log(2),
subsyst = ['A', 'B'], check_e = False, states = False):
""" Generate data_set of measures based on random states of uniformly
distributed entropies
Arguments
nb_training: int
how many training examples do we want
nb_testing: int
how many testing examples do we want
nb_m: int
How many measurements are we doing
e_min: float
Min entropy
e_max: float
Max entropy
check_e: bool
Verify that entanglement required is the same as the one obtained
states: bool
should we return the underlying states used
info
Output
------
res: (train, test, info)
train/test: (X, Y, states(optional))
info: str
Provides information about the columns and parameters used to
generate the data
"""
info = []
info.append("Number of measuremets: {0} ".format(nb_m))
info.append("Ent min/max required {0}/{1}".format(e_min, e_max))
nb_total = nb_train + nb_test
ent = np.random.uniform(e_min, e_max, nb_total)
lambd = bipartite.ent_to_lambda(ent)
st = bipartite.rdm_states_from_lambda(lambd)
if check_e:
ent_final = bipartite.entangl_of_states(st)
assert np.allclose(ent_final, ent), "Entanglement produced don't match"
X = np.zeros((nb_total, 3 * len(subsyst)))
for i, ss in enumerate(subsyst):
info.append("X[{0}:{1}] X, Y, Z measurements on subsyst {2}".format(3*i, 3*i+2, ss))
X[:, (3 * i)] = bipartite.meas_one_sub(st, nb_m, [1,0,0], ss)
X[:, (3 * i + 1)] = bipartite.meas_one_sub(st, nb_m, [0,1,0], ss)
X[:, (3 * i + 2)] = bipartite.meas_one_sub(st, nb_m, [0,0,1], ss)
index = np.arange(nb_total)
np.random.shuffle(index)
index_train = index[:nb_train]
index_test = index[(nb_train+1):nb_total]
if states:
train = (X[index_train, :], ent[index_train], st[index_train, :])
test = (X[index_test, :], ent[index_test], st[index_test, :])
else:
train = (X[index_train, :], ent[index_train])
test = (X[index_test, :], ent[index_test])
return train, test, "\n".join(info)
def write_data_set(data_set, name_data, info = '', name_folder=''):
""" Write a data_set as a txt file. if data_set contains the underlying
states they will be written in a separate file
Input
-----
data_set: tuple (X, Y, states(optional))
X: 2d-array
Y: 1d-array
states:
name_data:
name of the file to write - without any extension
info: str
meta-info about teh data
folder_name: str
name of the folder where the files are going to be written
Output
------
Write one (two) file(s), with some comments.
main: Y is the first column, X the others
"""
X = data_set[0]
Y = data_set[1]
states = data_set[2] if len(data_set) == 3 else None
np.savetxt(fname=name_folder + name_data + '.txt', X=np.c_[Y, X],
header=info)
if states is not None:
np.savetxt(fname=name_folder + name_data + '_states.txt',
X=states, header=info)
def write_and_save_dataset(name_data, nb_train, nb_test, nb_m, e_min=0,
e_max=np.log(2), subsyst=['A', 'B'], check_e=False,
states=True, name_folder=''):
""" Generate AND save a data_set
Input
-----
name_data: str
nb_train : int
nb_test : int
nb_m: int
e_min: float
e_max: float
subsyst=['A', 'B'],
check_e=False,
extra=True
name_folder=None
Output
------
csv file, with some comments
"""
train, test, info = gen_dataset(nb_train, nb_test, nb_m, e_min, e_max,
subsyst, check_e, states)
name_train = name_data + '_train'
name_test = name_data + '_test'
write_data_set(train, name_train, info , name_folder)
write_data_set(test, name_test, info, name_folder)
def load_data_set(name_data, name_folder='', print_info=True, states=False):
""" load a data_set with a given name and a given folder. Retrieve two files
train and test. Split it in X,Y and extra (optional)
Input
-----
name_data: str
Name of the data e.g. '10meas_perfect'
folder_name: str
location of the folder in which to retrieve name_data_train.txt and
name_data_test.txt
print_info: bool
print comments at the beginning of the txt file
states: bool
Output
------
data_set: (X_train, Y_train), (X_test, Y_test)
X_train: (nb_train, nb_features)
Y_train: 1D numpy-array (nb_train)
X_train: (nb_test, nb_features)
Y_train: 1D numpy-array (nb_test)
TODO: make it more flexible i.e meta-info in comments - parse it and use it
"""
X_train, Y_train= load_one_data_set(name_folder + name_data + '_train.txt')
X_test, Y_test = load_one_data_set(name_folder + name_data + '_test.txt')
if(states):
#deal witth complex values
states_train = np.loadtxt(
name_folder + name_data + '_train_states.txt', dtype=complex,
converters={0: lambda s: complex(s.decode().replace('+-', '-'))})
states_test = np.loadtxt(name_folder + name_data + '_test_states.txt',
dtype=complex, converters={0: lambda s: complex(s.decode().replace('+-', '-'))})
return (X_train, Y_train, states_train), (X_test, Y_test, states_test)
else:
return (X_train, Y_train), (X_test, Y_test)
def load_one_data_set(name_file, print_info=True):
""" load one data set
#TODO: implement print_info
"""
data = | np.loadtxt(name_file) | numpy.loadtxt |
"""
mathEX.py
@author: <NAME>
@email: <EMAIL>
Math-related functions, extensions.
"""
import numpy as np
from ml.utils.logger import get_logger
LOGGER = get_logger(__name__)
def change_to_multi_class(y):
"""
change the input prediction y to array-wise multi_class classifiers.
@param y:input prediction y, numpy arrays
@return: array-wise multi_class classifiers
"""
if not isinstance(y, np.ndarray) or y.ndim <= 1:
LOGGER.info('Input is not an np.ndarray, adding default value...')
y = np.array([[0]])
m = y.shape[1]
num_of_fields = int(y.max()) + 1
multi_class_y = np.zeros([num_of_fields, m])
for i in range(m):
label = y[0, i]
# print(type(label))
if isinstance(label, np.int64) and label >= 0:
multi_class_y[label, i] = 1
else:
multi_class_y[0, i] = 1
return multi_class_y
def compute_cost(al, y):
"""
compute costs between output results and actual results y. NEEDS TO BE MODIFIED.
@param al: output results, numpy arrays
@param y: actual result, numpy arrays
@return: cost, floats
"""
if isinstance(al, np.float) or isinstance(al, float) or isinstance(al, int):
al = np.array([[al]])
if isinstance(y, np.float) or isinstance(y, float) or isinstance(y, int):
y = np.array([[y]])
if not isinstance(al, np.ndarray) or not isinstance(y, np.ndarray) or not al.ndim > 1 or not y.ndim > 1:
LOGGER.info('Input is not an np.ndarray, adding default value...')
al = np.array([[0.5]])
y = np.array([[0.6]])
m = y.shape[1]
# Compute loss from aL and y.
cost = sum(sum((1. / m) * (-np.dot(y, np.log(al).T) - np.dot(1 - y, np.log(1 - al).T))))
cost = | np.squeeze(cost) | numpy.squeeze |
#!/usr/bin/env python
import os
import dill as pickle
import sys
import emcee
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table
from emcee.mpi_pool import MPIPool
import src.globals as glo
import pandas as pd
import numpy as np
import corner
from src.objects import SpecCandidate, TargetDataFrame, load_training_data
from src.utils import fits_pandas, parallelize, Str
from pyspark.sql import SparkSession
def process(func, items, hpc=False, sc=None, multi=True, n_proc=2):
if multi:
if hpc:
sc.parallelize(items).foreach(lambda i: func(*i))
else:
parallelize(lambda i: func(*i), items, n_proc=n_proc)
else:
for c in items:
func(*c)
def norm(z, n3, n2, n1, n0):
return n3 * z**3 + n2 * z**2 + n1 * z + n0
# return n1 * z + n0
# return n1 * z + n0
def slope(z, s3, s2, s1, s0):
return s3 * z**3 + s2 * z**2 + s1 * z + s0
def model(mag, z, params):
slope_args, norm_args = params[:4], params[4:]
col = (slope(z, *slope_args) * mag) + norm(z, *norm_args)
return col
def log_likelihood(theta, x, y, z, xerr, yerr, zerr):
params, lnf = theta[:-1], theta[-1]
mdl = model(x, z, params)
inv_sigma2 = 1.0 / (xerr**2 + yerr**2 + zerr**2 + mdl**2 * np.exp(2 * lnf))
return -0.5 * (np.sum((y - mdl)**2 * inv_sigma2 - np.log(inv_sigma2)))
def log_prior(theta):
n = 100
if all([(-n < param < n) for param in theta]):
return 0.0
return -np.inf
def log_probability(theta, x, y, z, xerr, yerr, zerr):
lg_prior = log_prior(theta)
if not np.isfinite(lg_prior):
return -np.inf
return lg_prior + log_likelihood(theta, x, y, z, xerr, yerr, zerr)
def main(df=None, pkl_chains=True, burnin=10000, nsteps=100000):
with MPIPool() as pool:
if not pool.is_master():
pool.wait()
sys.exit(0)
# pool = None
# if True:
# print('here')
df = load_training_data(from_pkl=True)
for col in glo.col_options:
key_mag, key_col = 'MAG_AUTO_I', f'MAG_AUTO_{col:u}'
key_mag_err = 'MAGERR_DETMODEL_I'
key_mag_err_0 = f'MAGERR_DETMODEL_{col[0]:u}'
key_mag_err_1 = f'MAGERR_DETMODEL_{col[1]:u}'
magnitude = df[key_mag].values
magnitude_err = df[key_mag_err].values
colour = df[key_col].values
colour_err = np.hypot(df[key_mag_err_0], df[key_mag_err_1]).values
redshift = df['Z'].values
redshift_err = df['Z_ERR'].values # pos=pos
ndim, nwalkers = 9, 100
pos = [1e-4 * np.random.randn(ndim) for _ in range(nwalkers)]
args = magnitude, colour, redshift, magnitude_err, colour_err, redshift_err
settings = nwalkers, ndim, log_probability
sampler = emcee.EnsembleSampler(*settings, args=args, pool=pool)
sampler.run_mcmc(pos, nsteps)
if pkl_chains is True:
name = f'rs.model.{col:l}.chain.pkl'
data = os.path.join(glo.DIR_INTERIM, name)
with open(data, 'wb') as data_out:
pickle.dump(sampler.chain, data_out)
plt.clf()
fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(8, 9))
labels = [f's{i}' for i in | np.arange(0, 4, 1) | numpy.arange |
# -*- coding: utf-8 -*-
import time,sys,os
from netCDF4 import Dataset
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def readlatlon(file_path):
arr = []
with open(file_path,'r') as f:
for Line in f:
arr.append(list(map(float, Line.split())))
return arr
def readASCIIfile(ASCIIfile):
arr = []
geoRefer = []
fh = iter(open(ASCIIfile))
skiprows = 6
for i in range(skiprows):
try:
this_line = next(fh)
geoRefer.append(float(this_line.split()[1]))
except StopIteration:break
while 1:
try:
this_line = next(fh)
if this_line:
arr.append(list(map(float, this_line.split())))
except StopIteration:break
fh.close()
return arr,geoRefer
def FYToArray(fyfile):
data = Dataset(fyfile)
namelist = ['SSI','DirSSI','DifSSI']
value = []
for j in namelist:
dataarr = data.variables[j][:1400]
dataarr[dataarr>2000]=np.nan
dataarr[dataarr==-9999]=np.nan
dataarr[dataarr==9999]=np.nan
value.append(dataarr)
return np.array(value)
def geoRefer2xy(geoRefer):
ncols,nrows,xll,yll,cellsize,NODATA_value = geoRefer
x = np.arange(xll,xll+ncols*cellsize,cellsize)
y = np.arange(yll,yll+nrows*cellsize,cellsize)
return x,y
def interpolat(points,values,x,y):
print('t01')
xv, yv = np.meshgrid(x, y)
print('t02',points.shape,values.shape,xv.shape,yv.shape)
grid_z2 = griddata(points, values, (xv, yv), method='linear') #'nearest''linear''cubic'
return grid_z2
def modiGHI(a,b,r):
c = a*(1+(r[0]*b/1000+r[1])*0.01)
return c
def lat2row(lat):
row = int(((lat - 9.995) / 0.01))
return row
def topoCorrection(radiaArray,deltHgt):
print(5)
ghi_ri=[]
rr = [[2.6036,0.0365],[2.6204,0.0365],[2.6553,0.0362],[2.6973,0.0356],[2.7459,0.0343]\
,[2.8012,0.0324],[2.8616,0.0299],[2.9236,0.0257],[2.9870,0.0204]]
if len(deltHgt) == len(radiaArray):
for i in range(len(deltHgt)):
if i>=lat2row(52.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[8]))
if i>=lat2row(47.5) and i<lat2row(52.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[7]))
if i>=lat2row(42.5) and i<lat2row(47.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[6]))
if i>=lat2row(37.5) and i<lat2row(42.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[5]))
if i>=lat2row(32.5) and i<lat2row(37.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[4]))
if i>=lat2row(27.5) and i<lat2row(32.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[3]))
if i>=lat2row(22.5) and i<lat2row(27.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]),np.array(deltHgt[i]),rr[2]))
if i>=lat2row(17.5) and i<lat2row(22.5):
ghi_ri.append(modiGHI(np.array(radiaArray[i]), | np.array(deltHgt[i]) | numpy.array |
"""
Unit tests for reg_mama.py. This should be run via pytest.
"""
import os
import sys
main_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
test_directory = os.path.abspath(os.path.join(main_directory, 'test'))
data_directory = os.path.abspath(os.path.join(test_directory, 'data'))
sys.path.append(main_directory)
import numpy as np
import pytest
import mama.reg_mama as reg_mama
###########################################
class TestFixedOptionHelper:
SIZE_LIST = [1, 2, 3, 4]
#########
@pytest.mark.parametrize("size", SIZE_LIST)
def test__all_free__return_expected(self, size):
result1 = reg_mama.fixed_option_helper(size, reg_mama.MAMA_REG_OPT_ALL_FREE)
result2 = reg_mama.fixed_option_helper(size)
nan_result1 = np.isnan(result1)
nan_result2 = np.isnan(result2)
assert np.all(nan_result1)
assert np.all(nan_result2)
#########
@pytest.mark.parametrize("size", SIZE_LIST)
def test__all_zero__return_expected(self, size):
result = reg_mama.fixed_option_helper(size, reg_mama.MAMA_REG_OPT_ALL_ZERO)
assert np.all(np.where(result == 0.0, True, False))
#########
@pytest.mark.parametrize("size", SIZE_LIST)
def test__offdiag_zero__return_expected(self, size):
result = reg_mama.fixed_option_helper(size, reg_mama.MAMA_REG_OPT_OFFDIAG_ZERO)
nan_result = np.isnan(result)
assert np.all(np.diag(nan_result))
assert np.all(nan_result.sum(axis=0) == 1)
assert np.all(nan_result.sum(axis=1) == 1)
#########
@pytest.mark.parametrize("size", SIZE_LIST)
def test__identity__return_expected(self, size):
result = reg_mama.fixed_option_helper(size, reg_mama.MAMA_REG_OPT_IDENT)
assert np.all(np.diag(result) == 1.0)
assert np.all(np.where(result == 1.0, True, False).sum(axis=0) == 1)
assert np.all(np.where(result == 1.0, True, False).sum(axis=1) == 1)
#########
@pytest.mark.parametrize("size", SIZE_LIST)
def test__valid_matrix_input__return_expected(self, size):
M = | np.random.rand(size, size) | numpy.random.rand |
import numpy as np
from napari.layers import Image
from vispy.color import Colormap
from napari.util.colormaps.colormaps import TransFire
def test_random_volume():
"""Test instantiating Image layer with random 3D data."""
shape = (10, 15, 20)
np.random.seed(0)
data = np.random.random(shape)
layer = Image(data)
layer.dims.ndisplay = 3
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer.dims.range == [(0, m, 1) for m in shape]
assert layer._data_view.shape == shape[-3:]
def test_switching_displayed_dimensions():
"""Test instantiating data then switching to displayed."""
shape = (10, 15, 20)
np.random.seed(0)
data = np.random.random(shape)
layer = Image(data)
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer.dims.range == [(0, m, 1) for m in shape]
# check displayed data is initially 2D
assert layer._data_view.shape == shape[-2:]
layer.dims.ndisplay = 3
# check displayed data is now 3D
assert layer._data_view.shape == shape[-3:]
layer.dims.ndisplay = 2
# check displayed data is now 2D
assert layer._data_view.shape == shape[-2:]
layer = Image(data)
layer.dims.ndisplay = 3
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer.dims.range == [(0, m, 1) for m in shape]
# check displayed data is initially 3D
assert layer._data_view.shape == shape[-3:]
layer.dims.ndisplay = 2
# check displayed data is now 2D
assert layer._data_view.shape == shape[-2:]
layer.dims.ndisplay = 3
# check displayed data is now 3D
assert layer._data_view.shape == shape[-3:]
def test_all_zeros_volume():
"""Test instantiating Image layer with all zeros data."""
shape = (10, 15, 20)
data = np.zeros(shape, dtype=float)
layer = Image(data)
layer.dims.ndisplay = 3
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer._data_view.shape == shape[-3:]
def test_integer_volume():
"""Test instantiating Image layer with integer data."""
shape = (10, 15, 20)
np.random.seed(0)
data = np.round(10 * np.random.random(shape)).astype(int)
layer = Image(data)
layer.dims.ndisplay = 3
assert | np.all(layer.data == data) | numpy.all |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def read ( fname ):
df = pd.read_csv ( fname, names = 'x y theta phi time'.split () )
#df['x'] *= 0.06
#df['y'] *= 0.06
home = df.loc[0,'x'], df.loc[0,'y']
df['dist'] = np.sqrt((df['x'] - home[0])**2 + (df['y']-home[1])**2.)*1000.
return df
def read_convergencedata ( fname ):
rawconv = pd.read_csv ( fname, header=None )
conv = rawconv[[33,13,17,18,1,16,34,7]]
conv.columns = 'J1 J2 J1_s J2_s iter dist J1_t J2_t'.split ()
dphi = conv['J2'][1:].values - conv['J2'][:-1]
dtheta = conv['J1'][1:].values - conv['J1'][:-1]
conv['J1_stepsize'] = dtheta / conv['J1_s']
conv['J2_stepsize'] = dphi / conv['J2_s']
conv['time'] = conv.index
return conv
def read_ctrlstep ( fname, stepseq = [100.,50.], verbose=False, motor_id=2, movesize=400 ):
if motor_id == 2:
aname = 'phi'
else:
aname = 'theta'
df = read(fname)
mm = df.apply ( lambda row: 'NAN' in str(row[aname]), axis=1 )
df = df.loc[~mm].astype(float)
#print(df['phi'].astype(float))
mdf = pd.DataFrame(index=df.index[1:], columns=['xpix', 'ypix', 'startangle','d'+aname,'movesize','stepsize'])
mdf['startangle'] = df[aname][:-1]
mdf['d' + aname ] = df[aname][1:].values - df[aname][:-1]
mdf['xpix'] = df['x']
mdf['ypix'] = df['y']
mdf['move_phys'] = np.sqrt((df['x'][1:].values - df['x'][:-1])**2 + (df['y'][1:].values - df['y'][:-1])**2)*90.
mdf['iter'] = 0
mdf['stepsize'] = 0.
stake = 0
if (movesize is None):
if verbose:
print('Attempting to infer move size...')
# Need to account for when motor 1 goes from 360 -> 0
namask = np.isfinite(mdf['d'+aname])
if motor_id == 2:
if stepseq[0] > 0:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[:len(stepseq)].index.sort_values()
else:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[-len(stepseq):].index.sort_values()
else:
if stepseq[0] > 0:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[len(stepseq):2*len(stepseq)].index.sort_values()
else:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[-len(stepseq)*2:-len(stepseq)].index.sort_values()
elif movesize == 0:
mdf['iter'] = 100
mdf['movesize'] = stepseq
mdf['stepsize'] = mdf['d'+aname]/mdf['movesize']
return mdf
else:
lng = np.arange(1,1+len(stepseq))
homes = np.ones_like(stepseq)*movesize*lng + lng
for idx,cstep in enumerate(stepseq):
#nstake = mdf.iloc[stake:].query('dphi<-10.').iloc[0].name
nstake = homes[idx]
mdf.loc[mdf.index[stake:nstake],'movesize'] = cstep
mdf.loc[mdf.index[stake:nstake],'iter'] = idx
mdf.loc[mdf.index[nstake-1],'stepsize'] = np.NaN
#print(mdf.loc[mdf.index[nstake],'stepsize'])
if verbose:
print('Break @ %i' % nstake)
stake = nstake
mdf['stepsize'] = mdf['d'+aname]/mdf['movesize']
mdf.loc[mdf.index[homes-1], 'stepsize'] = np.NaN
return mdf
def estimate_std ( mdf ):
angle_grid = np.arange(0,180.,6.)
assns = np.digitize ( mdf['startangle'], angle_grid )
stds = mdf.stepsize.groupby(assns).std()
stds = stds.replace ( np.NaN, 100.)
mdf['u_stepsize'] = stds.loc[assns].values
return mdf
def read_mdf ( fname ):
df = read(fname)
mdf = pd.DataFrame(index=df.index[1:], columns=['xpix','ypix','startangle','dphi','movesize','stepsize'])
mdf['startangle'] = df['phi'][:-1]
mdf['movesize'] = 0.
#mdf['movesize'] = 15.*(1.-mdf.index%2) -5.*(mdf.index%2)
mdf['xpix'] = df['x']
mdf['ypix'] = df['y']
mdf['dphi'] = df['phi'][1:].values - df['phi'][:-1]
mdf.loc[mdf['dphi']>0, 'movesize'] = 15.
mdf.loc[mdf['dphi']<0, 'movesize'] = -5.
#mdf.loc[mdf.index[::2],'movesize'] = 15.
#mdf.loc[mdf.index[1::2],'movesize'] = -5.
mdf['stepsize'] = mdf['dphi']/mdf['movesize']
mdf['is_fwd'] = np.sign(mdf['movesize']) > 0
return mdf
def plot ( df, axarr, cmap='PiYG', lbl=None ):
axarr[0].scatter ( df['x'], df['y'], s=9, c=df.index % 2,
cmap=cmap, alpha=0.8, label=lbl)
axarr[0].set_aspect('equal','datalim' )
axarr[1].scatter ( df.index, df['dist'], s=18, c=df.index % 2,
cmap=cmap, alpha=0.8 )
[ axarr[i].grid(alpha=0.4) for i in range(2) ]
axarr[0].set_xlabel ( 'x (mm)' )
axarr[0].set_ylabel ( 'y (mm)' )
axarr[1].set_xlabel ( 'time (step #)')
axarr[1].set_ylabel ( r'distance from start ($\mu$m)')
plt.tight_layout ()
return axarr
def run ( ):
dirnames = ['18_04_25_10_11_47_erin_test/',
'18_04_25_11_12_44_erin_test/',
'18_04_25_12_26_06_erin_test/']
cmaps=['PiYG','RdBu','PuOr_r']
labels=['Run 1','Run 2','Run 3']
for pid in range(1,57):
fig,axarr = plt.subplots(1,2,figsize=(10,4))
for i,cdir in enumerate(dirnames):
fname = './%s/Log/PhiSpecMove_mId_1_pId_%i.txt' % (cdir,pid)
df = read(fname)
plot ( df, axarr, cmaps[i] )
plt.savefig('./timevol_pid%i.png'%pid)
plt.close ()
def clean_map(ctrlstep):
ctrlstep = ctrlstep.convert_objects ()
#// filter ctrlstep based on Johannes' suggestions
lowthresh = 0.01
ctrlstep.loc[ctrlstep['stepsize'] < 0, 'stepsize'] = np.nan
ctrlstep.loc[ctrlstep['stepsize'] > 1., 'stepsize'] = np.nan
bins = | np.arange ( 0., 400., 10. ) | numpy.arange |
import numpy as np
import argparse
from time import time, strftime
import go
import playmodel
import cProfile
parser = argparse.ArgumentParser()
parser.add_argument("--episode", "-e", default=10000, type=int)
parser.add_argument("--output-intv", "-o", dest="output_intv", default=1000, type=int)
parser.add_argument("--size", "-s", dest="size", default=19, type=int)
parser.add_argument("--playouts", "-p", dest="playouts", default=256, type=int)
args = parser.parse_args()
# training parameters
EPOCHS = args.episode
PRINT_INTV = args.output_intv
WHITE = go.WHITE
BLACK = go.BLACK
BOARD_SIZE = args.size
if BOARD_SIZE <= 9:
KOMI = 5.5
elif BOARD_SIZE <= 13:
KOMI = 6.5
else:
KOMI = 7.5
def train():
open("log.txt", "w")
record_times = []
b_win_count = 0
playouts = args.playouts
for episode in range(EPOCHS):
#temperature = 1.0
steps = 0
pass_count = 0
winner = None
reward = 0 # reward is viewed from BLACK
while True:
t = time()
temperature = max(0.1, (1 + 1 / BOARD_SIZE) ** (-steps / 2)) # more step, less temperature
playouts = max(args.playouts // 2, int(playouts * 0.9)) # more step, less playouts
prev_grid = board.grid.copy()
if playouts > 0:
x, y = model.decide_monte_carlo(board, playouts)
else:
x, y = model.decide(board, temperature)
if y == BOARD_SIZE: # pass is indexed #size_square --> y = pass//BOARD_SIZE = BOARD_SIZE
pass_count += 1
board.pass_move()
else:
added_stone = go.Stone(board, (x, y))
if added_stone.islegal:
pass_count = 0
else:
continue
if pass_count >= 2:
winner, score_diff = board.score()
reward = model.WIN_REWARD if winner == BLACK else model.LOSE_REWARD # reward is viewd from BLACK
board.log_endgame(winner, "by " + str(score_diff))
if winner == BLACK:
b_win_count += 1
model.push_step(prev_grid, x + y * BOARD_SIZE, board.grid.copy())
if winner is not None:
break
steps += 1
record_times.append(time()-t)
# end while game
#temperature = max(min_temperature, initial_temperature / (1 + temperature_decay * episode))
model.enqueue_new_record(reward)
model.learn(verbose = ((episode + 1) % PRINT_INTV == 0))
if (episode + 1) % PRINT_INTV == 0:
model.save(str(BOARD_SIZE)+"_tmp.h5")
print("episode: %d\t B win rate: %.3f"%(episode, b_win_count/(episode+1)))
board.write_log_file(open("log.txt", "a"))
print("decide + update time: total %.4f, mean %.4f" % (np.sum(record_times), | np.mean(record_times) | numpy.mean |
import os, shutil
import numpy as np
import cv2, json
from skimage import measure
def get_annos_from_mask(mask_path, image_id, cate_id, instance_id):
"""
从mask文件里得到里面每个对象的annotation
Args:
mask_path:
image_id:
cate_id:
instance_id:
Returns:
"""
mask_arr = cv2.imdecode(np.fromfile(mask_path, dtype=np.uint8), flags=1)
# 避免这个通道的mask没有对象
ground_truth_binary_mask = mask_arr[:, :, 1]
if ground_truth_binary_mask.max() < 1:
ground_truth_binary_mask = mask_arr[:, :, 2]
if ground_truth_binary_mask.max() < 1:
ground_truth_binary_mask = mask_arr[:, :, 0]
if ground_truth_binary_mask.max() < 1:
raise ("max mask value low than 1 %s %s" % (mask_dir, mask))
contours = measure.find_contours(ground_truth_binary_mask, 0.5)
annos_list = []
for idx in range(len(contours)):
contour = contours[idx]
contour = np.flip(contour, axis=1)
arr_seg = np.expand_dims(contour, axis=1)
arr_seg = | np.array(arr_seg, dtype=np.int) | numpy.array |
"""Test file for float subgraph fusing"""
import random
from inspect import signature
import numpy
import pytest
from concrete.common.data_types.integers import Integer
from concrete.common.debugging.custom_assert import assert_not_reached
from concrete.common.optimization.topological import fuse_float_operations
from concrete.common.values import EncryptedScalar, EncryptedTensor
from concrete.numpy import tracing
from concrete.numpy.tracing import trace_numpy_function
def no_fuse(x):
"""No fuse"""
return x + 2
def no_fuse_unhandled(x, y):
"""No fuse unhandled"""
x_1 = x + 0.7
y_1 = y + 1.3
intermediate = x_1 + y_1
return intermediate.astype(numpy.int32)
def fusable_with_bigger_search(x, y):
"""fusable with bigger search"""
x = x + 1
x_1 = x.astype(numpy.int32)
x_1 = x_1 + 1.5
x_2 = x.astype(numpy.int32)
x_2 = x_2 + 3.4
add = x_1 + x_2
add_int = add.astype(numpy.int32)
return add_int + y
def fusable_with_bigger_search_needs_second_iteration(x, y):
"""fusable with bigger search and triggers a second iteration in the fusing"""
x = x + 1
x = x + 0.5
x = numpy.cos(x)
x_1 = x.astype(numpy.int32)
x_1 = x_1 + 1.5
x_p = x + 1
x_p2 = x_p + 1
x_2 = (x_p + x_p2).astype(numpy.int32)
x_2 = x_2 + 3.4
add = x_1 + x_2
add_int = add.astype(numpy.int32)
return add_int + y
def no_fuse_big_constant_3_10_10(x):
"""Pass an array x with size < 100 to trigger a no fuse condition."""
x = x.astype(numpy.float64)
return (x + numpy.ones((3, 10, 10))).astype(numpy.int32)
def no_fuse_dot(x):
"""No fuse dot"""
return numpy.dot(x, numpy.full((10,), 1.33, dtype=numpy.float64)).astype(numpy.int32)
def simple_create_fuse_opportunity(f, x):
"""No fuse because the function is explicitely marked as unfusable in our code."""
return f(x.astype(numpy.float64)).astype(numpy.int32)
def ravel_cases(x):
"""Simple ravel cases"""
return simple_create_fuse_opportunity(numpy.ravel, x)
def transpose_cases(x):
"""Simple transpose cases"""
return simple_create_fuse_opportunity(numpy.transpose, x)
def reshape_cases(x, newshape):
"""Simple reshape cases"""
return simple_create_fuse_opportunity(lambda x: numpy.reshape(x, newshape), x)
def simple_fuse_not_output(x):
"""Simple fuse not output"""
intermediate = x.astype(numpy.float64)
intermediate = intermediate.astype(numpy.int32)
return intermediate + 2
def simple_fuse_output(x):
"""Simple fuse output"""
return x.astype(numpy.float64).astype(numpy.int32)
def mix_x_and_y_intricately_and_call_f(function, x, y):
"""Mix x and y in an intricated way, that can't be simplified by
an optimizer eg, and then call function
"""
intermediate = x + y
intermediate = intermediate + 2
intermediate = intermediate.astype(numpy.float32)
intermediate = intermediate.astype(numpy.int32)
x_p_1 = intermediate + 1.5
x_p_2 = intermediate + 2.7
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_and_call_f(function, x, y):
"""Mix x and y and then call function"""
x_p_1 = x + 0.1
x_p_2 = x + 0.2
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_into_range_0_to_1_and_call_f(function, x, y):
"""Mix x and y and then call function, in such a way that the input to function is between
0 and 1"""
x_p_1 = x + 0.1
x_p_2 = x + 0.2
x_p_4 = 1 - numpy.abs(numpy.sin(x_p_1 + x_p_2 + 0.3))
x_p_3 = function(x_p_4)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def mix_x_and_y_into_integer_and_call_f(function, x, y):
"""Mix x and y but keep the entry to function as an integer"""
x_p_1 = x + 1
x_p_2 = x + 2
x_p_3 = function(x_p_1 + x_p_2)
return (
x_p_3.astype(numpy.int32),
x_p_2.astype(numpy.int32),
(x_p_2 + 3).astype(numpy.int32),
x_p_3.astype(numpy.int32) + 67,
y,
(y + 4.7).astype(numpy.int32) + 3,
)
def get_func_params_int32(func, scalar=True):
"""Returns a dict with parameters as scalar int32"""
return {
param_name: EncryptedScalar(Integer(32, True))
if scalar
else EncryptedTensor(Integer(32, True), (1,))
for param_name in signature(func).parameters.keys()
}
@pytest.mark.parametrize(
"function_to_trace,fused,params,warning_message",
[
pytest.param(no_fuse, False, get_func_params_int32(no_fuse), "", id="no_fuse"),
pytest.param(
no_fuse_unhandled,
False,
get_func_params_int32(no_fuse_unhandled),
"""
The following subgraph is not fusable:
%0 = x # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ one of 2 variable inputs (can only have 1 for fusing)
%1 = 0.7 # ClearScalar<float64>
%2 = y # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ one of 2 variable inputs (can only have 1 for fusing)
%3 = 1.3 # ClearScalar<float64>
%4 = add(%0, %1) # EncryptedScalar<float64>
%5 = add(%2, %3) # EncryptedScalar<float64>
%6 = add(%4, %5) # EncryptedScalar<float64>
%7 = astype(%6, dtype=int32) # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cannot fuse here as the subgraph has 2 variable inputs
return %7
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_unhandled",
),
pytest.param(
fusable_with_bigger_search,
True,
get_func_params_int32(fusable_with_bigger_search),
None,
id="fusable_with_bigger_search",
),
pytest.param(
fusable_with_bigger_search_needs_second_iteration,
True,
get_func_params_int32(fusable_with_bigger_search_needs_second_iteration),
None,
id="fusable_with_bigger_search",
),
pytest.param(
no_fuse_dot,
False,
{"x": EncryptedTensor(Integer(32, True), (10,))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10,)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ input node with shape (10,)
%1 = [1.33 1.33 ... 1.33 1.33] # ClearTensor<float64, shape=(10,)>
%2 = dot(%0, %1) # EncryptedScalar<float64>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ output shapes: #0, () are not the same as the subgraph's input: (10,)
%3 = astype(%2, dtype=int32) # EncryptedScalar<int32>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ output shapes: #0, () are not the same as the subgraph's input: (10,)
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_dot",
),
pytest.param(
ravel_cases,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = ravel(%1) # EncryptedTensor<float64, shape=(200,)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(200,)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_ravel",
),
pytest.param(
transpose_cases,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = transpose(%1) # EncryptedTensor<float64, shape=(20, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(20, 10)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_transpose",
),
pytest.param(
lambda x: reshape_cases(x, (20, 10)),
False,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
"""
The following subgraph is not fusable:
%0 = x # EncryptedTensor<int32, shape=(10, 20)>
%1 = astype(%0, dtype=float64) # EncryptedTensor<float64, shape=(10, 20)>
%2 = reshape(%1, newshape=(20, 10)) # EncryptedTensor<float64, shape=(20, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this node is explicitely marked by the package as non-fusable
%3 = astype(%2, dtype=int32) # EncryptedTensor<int32, shape=(20, 10)>
return %3
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_explicitely_reshape",
),
pytest.param(
no_fuse_big_constant_3_10_10,
False,
{"x": EncryptedTensor(Integer(32, True), (10, 10))},
"""
The following subgraph is not fusable:
%0 = [[[1. 1. 1 ... . 1. 1.]]] # ClearTensor<float64, shape=(3, 10, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ this constant node has a bigger shape (3, 10, 10) than the subgraph's input: (10, 10)
%1 = x # EncryptedTensor<int32, shape=(10, 10)>
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ input node with shape (10, 10)
%2 = astype(%1, dtype=float64) # EncryptedTensor<float64, shape=(10, 10)>
%3 = add(%2, %0) # EncryptedTensor<float64, shape=(3, 10, 10)>
%4 = astype(%3, dtype=int32) # EncryptedTensor<int32, shape=(3, 10, 10)>
return %4
""".strip(), # noqa: E501 # pylint: disable=line-too-long
id="no_fuse_big_constant_3_10_10",
),
pytest.param(
simple_fuse_not_output,
True,
get_func_params_int32(simple_fuse_not_output),
None,
id="simple_fuse_not_output",
),
pytest.param(
simple_fuse_output,
True,
get_func_params_int32(simple_fuse_output),
None,
id="simple_fuse_output",
),
pytest.param(
lambda x, y: mix_x_and_y_intricately_and_call_f(numpy.rint, x, y),
True,
get_func_params_int32(lambda x, y: None),
None,
id="mix_x_and_y_intricately_and_call_f_with_rint",
),
pytest.param(
lambda x, y: mix_x_and_y_and_call_f(numpy.rint, x, y),
True,
get_func_params_int32(lambda x, y: None),
None,
id="mix_x_and_y_and_call_f_with_rint",
),
pytest.param(
transpose_cases,
True,
get_func_params_int32(transpose_cases),
None,
id="transpose_cases scalar",
),
pytest.param(
transpose_cases,
True,
{"x": EncryptedTensor(Integer(32, True), (10,))},
None,
id="transpose_cases ndim == 1",
),
pytest.param(
ravel_cases,
True,
{"x": EncryptedTensor(Integer(32, True), (10,))},
None,
id="ravel_cases ndim == 1",
),
pytest.param(
lambda x: reshape_cases(x, (10, 20)),
True,
{"x": EncryptedTensor(Integer(32, True), (10, 20))},
None,
id="reshape_cases same shape",
),
],
)
def test_fuse_float_operations(
function_to_trace,
fused,
params,
warning_message,
capfd,
remove_color_codes,
check_array_equality,
):
"""Test function for fuse_float_operations"""
op_graph = trace_numpy_function(
function_to_trace,
params,
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
if fused:
assert fused_num_nodes < orig_num_nodes
else:
assert fused_num_nodes == orig_num_nodes
captured = capfd.readouterr()
assert warning_message in (output := remove_color_codes(captured.err)), output
for input_ in [0, 2, 42, 44]:
inputs = ()
for param_input_value in params.values():
if param_input_value.is_scalar:
input_ = numpy.int32(input_)
else:
input_ = numpy.full(param_input_value.shape, input_, dtype=numpy.int32)
inputs += (input_,)
check_array_equality(function_to_trace(*inputs), op_graph(*inputs))
def subtest_tensor_no_fuse(fun, tensor_shape):
"""Test case to verify float fusing is only applied on functions on scalars."""
if tensor_shape == ():
# We want tensors
return
if fun in LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES:
# We need at least one input of the bivariate function to be float
return
# Float fusing currently cannot work if the constant in a bivariate operator is bigger than the
# variable input.
# Make a broadcastable shape but with the constant being bigger
variable_tensor_shape = (1,) + tensor_shape
constant_bigger_shape = (random.randint(2, 10),) + tensor_shape
def tensor_no_fuse(x):
intermediate = x.astype(numpy.float64)
intermediate = fun(intermediate, numpy.ones(constant_bigger_shape))
return intermediate.astype(numpy.int32)
function_to_trace = tensor_no_fuse
params_names = signature(function_to_trace).parameters.keys()
op_graph = trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), shape=variable_tensor_shape)
for param_name in params_names
},
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
assert orig_num_nodes == fused_num_nodes
def check_results_are_equal(function_result, op_graph_result):
"""Check the output of function execution and OPGraph evaluation are equal."""
if isinstance(function_result, tuple) and isinstance(op_graph_result, tuple):
assert len(function_result) == len(op_graph_result)
are_equal = (
function_output == op_graph_output
for function_output, op_graph_output in zip(function_result, op_graph_result)
)
elif not isinstance(function_result, tuple) and not isinstance(op_graph_result, tuple):
are_equal = (function_result == op_graph_result,)
else:
assert_not_reached(f"Incompatible outputs: {function_result}, {op_graph_result}")
return all(value.all() if isinstance(value, numpy.ndarray) else value for value in are_equal)
def subtest_fuse_float_unary_operations_correctness(fun, tensor_shape):
"""Test a unary function with fuse_float_operations."""
# Some manipulation to avoid issues with domain of definitions of functions
if fun == numpy.arccosh:
# 0 is not in the domain of definition
input_list = [1, 2, 42, 44]
super_fun_list = [mix_x_and_y_and_call_f]
elif fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan]:
# Needs values between 0 and 1 in the call function
input_list = [0, 2, 42, 44]
super_fun_list = [mix_x_and_y_into_range_0_to_1_and_call_f]
elif fun in [numpy.cosh, numpy.sinh, numpy.exp, numpy.exp2, numpy.expm1]:
# Not too large values to avoid overflows
input_list = [1, 2, 5, 11]
super_fun_list = [mix_x_and_y_and_call_f, mix_x_and_y_intricately_and_call_f]
else:
# Regular case
input_list = [0, 2, 42, 44]
super_fun_list = [mix_x_and_y_and_call_f, mix_x_and_y_intricately_and_call_f]
for super_fun in super_fun_list:
for input_ in input_list:
def get_function_to_trace():
return lambda x, y: super_fun(fun, x, y)
function_to_trace = get_function_to_trace()
params_names = signature(function_to_trace).parameters.keys()
op_graph = trace_numpy_function(
function_to_trace,
{
param_name: EncryptedTensor(Integer(32, True), tensor_shape)
for param_name in params_names
},
)
orig_num_nodes = len(op_graph.graph)
fuse_float_operations(op_graph)
fused_num_nodes = len(op_graph.graph)
assert fused_num_nodes < orig_num_nodes
# Check that the call to the function or to the op_graph evaluation give the same
# result
tensor_diversifier = (
# The following +1 in the range is to avoid to have 0's which is not in the
# domain definition of some of our functions
numpy.arange(1, numpy.product(tensor_shape) + 1, dtype=numpy.int32).reshape(
tensor_shape
)
if tensor_shape != ()
else 1
)
if fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan]:
# Domain of definition for these functions
tensor_diversifier = (
numpy.ones(tensor_shape, dtype=numpy.int32) if tensor_shape != () else 1
)
input_ = numpy.int32(input_ * tensor_diversifier)
num_params = len(params_names)
assert num_params == 2
# Create inputs which are either of the form [x, x] or [x, y]
for j in range(4):
if fun in [numpy.arctanh, numpy.arccos, numpy.arcsin, numpy.arctan] and j > 0:
# Domain of definition for these functions
break
input_a = input_
input_b = input_ + j
if tensor_shape != ():
numpy.random.shuffle(input_a)
numpy.random.shuffle(input_b)
inputs = (input_a, input_b) if random.randint(0, 1) == 0 else (input_b, input_a)
function_result = function_to_trace(*inputs)
op_graph_result = op_graph(*inputs)
assert check_results_are_equal(function_result, op_graph_result)
LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES = {
numpy.bitwise_and,
numpy.bitwise_or,
numpy.bitwise_xor,
numpy.gcd,
numpy.lcm,
numpy.ldexp,
numpy.left_shift,
numpy.logical_and,
numpy.logical_not,
numpy.logical_or,
numpy.logical_xor,
numpy.remainder,
numpy.right_shift,
}
def subtest_fuse_float_binary_operations_correctness(fun, tensor_shape):
"""Test a binary functions with fuse_float_operations, with a constant as a source."""
for i in range(4):
# Know if the function is defined for integer inputs
if fun in LIST_OF_UFUNC_WHICH_HAVE_INTEGER_ONLY_SOURCES:
if i not in [0, 2]:
continue
# The .astype(numpy.float64) that we have in cases 0 and 2 is here to force
# a float output even for functions which return an integer (eg, XOR), such
# that our frontend always try to fuse them
# The .astype(numpy.float64) that we have in cases 1 and 3 is here to force
# a float output even for functions which return a bool (eg, EQUAL), such
# that our frontend always try to fuse them
# For bivariate functions: fix one of the inputs
if i == 0:
# With an integer in first position
ones_0 = numpy.ones(tensor_shape, dtype=numpy.int32) if tensor_shape != () else 1
def get_function_to_trace():
return lambda x, y: fun(3 * ones_0, x + y).astype(numpy.float64).astype(numpy.int32)
elif i == 1:
# With a float in first position
ones_1 = | numpy.ones(tensor_shape, dtype=numpy.float64) | numpy.ones |
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../src")
from blackscholes.utils.GBM import GBM
from blackscholes.mc.Euro import Euro
from blackscholes.mc.American import American
from utils.Experiment import MCEuroExperiment, MCEuroExperimentStd, MCAmerExperimentStd
import utils.Pickle as hdpPickle
import unittest
import numpy as np
class Test(unittest.TestCase):
def test_amer_std(self):
# although this is not a euro experiment...
T = 1
strike = 50
asset_num = 1
init_price_vec = 50*np.ones(asset_num)
vol_vec = 0.5*np.ones(asset_num)
ir = 0.05
dividend_vec = np.zeros(asset_num)
corr_mat = np.eye(asset_num)
nTime = 365
random_walk = GBM(T, nTime, init_price_vec, ir, vol_vec, dividend_vec, corr_mat)
def test_payoff(*l):
return max(strike - np.sum(l), 0)
opt = American(test_payoff, random_walk)
MCAmerExperimentStd(10, 16, 30, opt)
def test_amer(self):
# although this is not a euro experiment...
T = 1
strike = 50
asset_num = 1
init_price_vec = 50*np.ones(asset_num)
vol_vec = 0.5*np.ones(asset_num)
ir = 0.05
dividend_vec = np.zeros(asset_num)
corr_mat = np.eye(asset_num)
nTime = 365
random_walk = GBM(T, nTime, init_price_vec, ir, vol_vec, dividend_vec, corr_mat)
def test_payoff(*l):
return max(strike - np.sum(l), 0)
opt = American(test_payoff, random_walk)
analy = 8.723336355455928
np.random.seed(1)
result = MCEuroExperiment(analy, 10, 16, opt, "V1")
hdpPickle.dump(result, 'MCAmer_1d.pickle')
print(result)
def test_std_6d(self):
dim = 6
T = 1
strike = 40
init_price_vec = np.full(dim, 40)
vol = 0.2
ir = 0.06
dividend = 0.04
corr = 0.25
vol_vec = | np.full(dim, vol) | numpy.full |
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
if __name__ == "__main__":
# # Problem 1 - Gaussian Process Modelling
# ## Data I/O
X_train = np.genfromtxt('hw3-data/gaussian_process/X_train.csv', delimiter=',')
X_test = np.genfromtxt('hw3-data/gaussian_process/X_test.csv', delimiter=',')
y_train = np.genfromtxt('hw3-data/gaussian_process/y_train.csv', delimiter=',')
y_test = np.genfromtxt('hw3-data/gaussian_process/y_test.csv', delimiter=',')
# ## Helper Functions
def calculateRMSE(y_pred, y_test):
n = y_pred.shape[0]
return np.linalg.norm(y_pred - y_test)/(n**0.5)
# ## Gaussian Process Regression
class GaussianProcessRegression():
def __init__(self):
pass
def standardize(self, y):
mean = np.mean(y)
std = np.std(y)
y = (y - mean)/std
self.mean = mean
self.std = std
return y
def calcKernel(self):
X = self.X
(n, d) = X.shape
K = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
xi, xj = X[i, :].flatten(), X[j, :].flatten()
k = self.calcRadialDistance(xi, xj)
K[i, j] = k
K[j, i] = k
self.K = K
def transformOutput(self, y):
y = y*self.std + self.mean
return y
def calcRadialDistance(self, x1, x2):
return np.exp(-1*(np.linalg.norm(x1-x2)**2)/self.b)
def train(self, X, y):
self.X = X
self.y = self.standardize(y)
def setb(self, b):
self.b = b
self.calcKernel()
def predict(self, X_t, sig):
X = self.X
y = self.y
(n, d) = X.shape
(m, d) = X_t.shape
Kn = np.zeros((m, n))
for i in range(m):
for j in range(n):
Kn[i, j] = self.calcRadialDistance(X_t[i, :].flatten(), X[j, :].flatten())
Kn = Kn.reshape((m, n))
K = self.K
mu = Kn.dot(np.linalg.inv((sig)*np.identity(n) + K)).dot(y)
#cov = (sig**2) + 1 - Kn.dot(np.linalg.inv((sig**2)*np.identity(n) + K)).dot(Kn.T)
return self.transformOutput(mu)
GPR = GaussianProcessRegression()
GPR.train(X_train, y_train)
# ## RMSE vs. (b, $\sigma^2$)
b_tests = [5, 7, 9, 11, 13, 15]
sig_tests = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
results = np.zeros((len(b_tests), len(sig_tests)))
for i in range(len(b_tests)):
GPR.setb(b_tests[i])
for j in range(len(sig_tests)):
y_pred = GPR.predict(X_test, sig_tests[j])
results[i, j] = calculateRMSE(y_pred, y_test)
plt.figure(figsize=(20, 10))
sns.set_style('whitegrid')
sns.heatmap(results, annot=True, annot_kws={"size": 15}, fmt='.3f', xticklabels=sig_tests, yticklabels=b_tests)
plt.xlabel('sig_squared', fontsize=20)
plt.ylabel('b', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title('RMSE', fontsize=20)
plt.savefig('1b.png')
#plt.show()
# ## Prediction using only a single dimension - (car weight)
(n, d) = X_train.shape
X_test_f4 = X_train[:, 3].reshape(n, 1)
for i in range(d-1):
X_test_f4 = np.column_stack((X_test_f4, X_train[:, 3].reshape((n, 1))))
GPR.setb(5)
y_test_f4 = GPR.predict(X_test_f4, 2)
plt.figure(figsize=(20, 10))
plt.scatter(X_train[:, 3], y_test_f4, label="Predictions")
plt.scatter(X_train[:, 3], y_train, label="Training Data")
plt.xlabel("car_weight", fontsize=20)
plt.ylabel("Mileage", fontsize=20)
plt.legend(fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
#plt.show()
GPR_x4 = GaussianProcessRegression()
GPR_x4.train(X_train[:, 3].reshape(X_train.shape[0], 1), y_train)
GPR_x4.setb(5)
y_train_f4 = GPR_x4.predict(X_train[:, 3].reshape(X_train.shape[0], 1), 2)
plt.figure(figsize=(20, 10))
plt.scatter(X_train[:, 3], y_train_f4, label="Predictions")
plt.scatter(X_train[:, 3], y_train, label="Training Data")
plt.xlabel("car_weight", fontsize=20)
plt.ylabel("Mileage", fontsize=20)
plt.legend(fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig('1d_new.png')
#plt.show()
# # Problem 2 - Boosting
# ## Data I/O
X_train = np.genfromtxt('hw3-data/boosting/X_train.csv', delimiter=',')
X_test = np.genfromtxt('hw3-data/boosting/X_test.csv', delimiter=',')
y_train = np.genfromtxt('hw3-data/boosting/y_train.csv', delimiter=',')
y_test = np.genfromtxt('hw3-data/boosting/y_test.csv', delimiter=',')
X_train_w_ones = np.column_stack((X_train, np.ones(X_train.shape[0])))
X_test_w_ones = np.column_stack((X_test, np.ones(X_test.shape[0])))
# ## Helper Functions
def calculateAccuracy(predictions, ground_truth):
n = predictions.shape[0]
assert n == ground_truth.shape[0]
return y_pred[y_pred==ground_truth].shape[0]/n
def calculateErrorRate(predictions, ground_truth):
n = predictions.shape[0]
assert n == ground_truth.shape[0]
return np.sum(y_pred!=ground_truth)/n
# ## Least Square Classifier
class LeastSquareClassifier():
def __init__(self):
self.weights = None
def train(self, X, y):
(n, d) = X.shape
XTX = X.T.dot(X)
w = np.linalg.inv(XTX).dot(X.T).dot(y)
assert w.shape[0] == d
self.weights = w
def test(self, X):
w = self.weights
(n, d) = X.shape
y_int = X.dot(w)
return y_int/np.abs(y_int)
LSClassifier = LeastSquareClassifier()
LSClassifier.train(X_train_w_ones, y_train)
y_pred = LSClassifier.test(X_test_w_ones)
print("Basic Least Square Classifier Accuracy: {}".format(calculateAccuracy(y_pred, y_test)))
# ## Boosted Least Square Classifier
class BoostedLeastSquareClassifier():
def __init__(self, classifier):
self.classifier = classifier
self.classifiers = []
self.alphas = None
self.eps = None
self.training_errors = None
self.testing_errors = None
self.weights = None
self.sample_tracker = []
def train(self, X, y, num_of_classifiers):
np.random.seed(0)
self.training_errors = []
eps = np.zeros(num_of_classifiers)
self.alphas = np.zeros(num_of_classifiers)
(n, d) = X.shape
w = np.ones(n)/n
y_pred_int = np.zeros(n)
for i in range(num_of_classifiers):
c = self.classifier()
sample_indices = np.random.choice(n, size=n, replace=True, p=w)
self.sample_tracker.extend(list(sample_indices))
X_sample = X[sample_indices,:]
y_sample = y[sample_indices]
c.train(X_sample, y_sample)
y_pred = c.test(X)
e = float(np.dot(w[y_pred!=y], np.ones(n)[y_pred!=y]))
if(e>0.5):
w_int = c.weights
w_int = -1*w_int
c.weights = w_int
y_pred = c.test(X)
e = float(np.dot(w[y_pred!=y], np.ones(n)[y_pred!=y]))
eps[i] = e
a = 0.5*np.log(((1-e)/e))
self.alphas[i] = a
y_pred_int = y_pred_int + (self.alphas[i]*y_pred)
y_pred_final = y_pred_int/np.abs(y_pred_int)
self.training_errors.append(np.sum(y_pred_final != y)/ n)
w = w*(np.exp(-a*y*y_pred))
w = (w/np.sum(w))
self.classifiers.append(c)
self.eps = eps
def test(self, X, y, num_of_classifiers=None):
(n, d) = X.shape
if num_of_classifiers == None:
num_of_classifiers = self.alphas.shape[0]
self.testing_errors = []
y_pred_int = np.zeros(n)
for i in range(num_of_classifiers):
c = self.classifiers[i]
y_pred = c.test(X)
y_pred_int = y_pred_int + (self.alphas[i]*y_pred)
y_pred_final = y_pred_int/np.abs(y_pred_int)
self.testing_errors.append(np.sum(y_pred_final != y)/ n)
return y_pred_final
BLSClassifier = BoostedLeastSquareClassifier(LeastSquareClassifier)
BLSClassifier.train(X_train_w_ones, y_train, 1500)
y_pred = BLSClassifier.test(X_test_w_ones, y_test)
print("Boosted Least Square Classifier Accuracy: {}".format(calculateAccuracy(y_pred, y_test)))
# ## Plot of Training Error vs. Testing Error
plt.figure(figsize=(20, 10))
sns.set_style('whitegrid')
plt.plot(BLSClassifier.training_errors, label="Training Error", linewidth=3)
plt.plot(BLSClassifier.testing_errors, label="Testing Error", linewidth=3)
plt.legend(fontsize=20)
plt.xlabel("Boosting Interations", fontsize=20)
plt.ylabel("Error", fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig('2a.png')
#plt.show()
# ## Plot of Training Error vs. Theoritical Upper Bound
def calculateUpperBound(errors):
prod = 1
t = errors.shape[0]
upper_bound = []
for i in range(t):
e = errors[i]
prod *= np.exp(-2*((0.5-e)**2))
upper_bound.append(prod)
return | np.array(upper_bound) | numpy.array |
import os
import sys
import shutil
import numpy as np
import tensorflow as tf
sys.path.append('/home/janvijay.singh/ASR/asr_abhinav_subword_300_30_june')
from utils import objdict, checkpoint
from models import create_model
def getTranspose(input_fn, output_fn):
k1 = np.load(input_fn)
k1_T = np.ascontiguousarray(k1.T, dtype=np.float32)
# k1_T.flags['C_CONTIGUOUS']
| np.save(output_fn, k1_T) | numpy.save |
import random
import numpy as np
import os
from PIL import Image
import cv2
Single_Num=2000
classes=10
Num=Single_Num*classes
SVHNArray= | np.empty((Num,32,32,3)) | numpy.empty |
from __future__ import division, print_function
import sys
import numpy as np
import matplotlib.mlab as mlab
import tempfile
import unittest
class general_test(unittest.TestCase):
def test_colinear_pca(self):
a = mlab.PCA._get_colinear()
pca = mlab.PCA(a)
np.testing.assert_allclose(pca.fracs[2:], 0., atol=1e-8)
np.testing.assert_allclose(pca.Y[:, 2:], 0., atol=1e-8)
def test_prctile(self):
# test odd lengths
x = [1, 2, 3]
self.assertEqual(mlab.prctile(x, 50), np.median(x))
# test even lengths
x = [1, 2, 3, 4]
self.assertEqual(mlab.prctile(x, 50), np.median(x))
# derived from email sent by jason-sage to MPL-user on 20090914
ob1 = [1, 1, 2, 2, 1, 2, 4, 3, 2, 2, 2, 3,
4, 5, 6, 7, 8, 9, 7, 6, 4, 5, 5]
p = [0, 75, 100]
expected = [1, 5.5, 9]
# test vectorized
actual = mlab.prctile(ob1, p)
np.testing.assert_allclose(expected, actual)
# test scalar
for pi, expectedi in zip(p, expected):
actuali = mlab.prctile(ob1, pi)
np.testing.assert_allclose(expectedi, actuali)
class csv_testcase(unittest.TestCase):
def setUp(self):
if sys.version_info[0] == 2:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="wb+")
else:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="w+",
newline='')
def tearDown(self):
self.fd.close()
def test_recarray_csv_roundtrip(self):
expected = np.recarray((99,),
[('x', np.float),
('y', np.float),
('t', np.float)])
# initialising all values: uninitialised memory sometimes produces
# floats that do not round-trip to string and back.
expected['x'][:] = np.linspace(-1e9, -1, 99)
expected['y'][:] = np.linspace(1, 1e9, 99)
expected['t'][:] = np.linspace(0, 0.01, 99)
mlab.rec2csv(expected, self.fd)
self.fd.seek(0)
actual = mlab.csv2rec(self.fd)
np.testing.assert_allclose(expected['x'], actual['x'])
np.testing.assert_allclose(expected['y'], actual['y'])
np.testing.assert_allclose(expected['t'], actual['t'])
def test_rec2csv_bad_shape_ValueError(self):
bad = np.recarray((99, 4), [('x', np.float), ('y', np.float)])
# the bad recarray should trigger a ValueError for having ndim > 1.
self.assertRaises(ValueError, mlab.rec2csv, bad, self.fd)
class spectral_testcase(unittest.TestCase):
def setUp(self):
self.Fs = 100.
self.fstims = [self.Fs/4, self.Fs/5, self.Fs/10]
self.x = np.arange(0, 10000, 1/self.Fs)
self.NFFT = 1000*int(1/min(self.fstims) * self.Fs)
self.noverlap = int(self.NFFT/2)
self.pad_to = int(2**np.ceil(np.log2(self.NFFT)))
self.freqss = np.linspace(0, self.Fs/2, num=self.pad_to//2+1)
self.freqsd = np.linspace(-self.Fs/2, self.Fs/2, num=self.pad_to,
endpoint=False)
self.t = self.x[self.NFFT//2::self.NFFT-self.noverlap]
self.y = [np.zeros(self.x.size)]
for i, fstim in enumerate(self.fstims):
self.y.append(np.sin(fstim * self.x * np.pi * 2))
self.y.append(np.sum(self.y, axis=0))
# get the list of frequencies in each test
self.fstimsall = [[]] + [[f] for f in self.fstims] + [self.fstims]
def test_psd(self):
for y, fstims in zip(self.y, self.fstimsall):
Pxx1, freqs1 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='default')
np.testing.assert_array_equal(freqs1, self.freqss)
for fstim in fstims:
i = np.abs(freqs1 - fstim).argmin()
self.assertTrue(Pxx1[i] > Pxx1[i+1])
self.assertTrue(Pxx1[i] > Pxx1[i-1])
Pxx2, freqs2 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='onesided')
np.testing.assert_array_equal(freqs2, self.freqss)
for fstim in fstims:
i = np.abs(freqs2 - fstim).argmin()
self.assertTrue(Pxx2[i] > Pxx2[i+1])
self.assertTrue(Pxx2[i] > Pxx2[i-1])
Pxx3, freqs3 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='twosided')
np.testing.assert_array_equal(freqs3, self.freqsd)
for fstim in fstims:
i = np.abs(freqs3 - fstim).argmin()
self.assertTrue(Pxx3[i] > Pxx3[i+1])
self.assertTrue(Pxx3[i] > Pxx3[i-1])
def test_specgram(self):
for y, fstims in zip(self.y, self.fstimsall):
Pxx1, freqs1, t1 = mlab.specgram(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='default')
Pxx1m = np.mean(Pxx1, axis=1)
np.testing.assert_array_equal(freqs1, self.freqss)
np.testing.assert_array_equal(t1, self.t)
# since we are using a single freq, all time slices should be
# about the same
np.testing.assert_allclose(np.diff(Pxx1, axis=1).max(), 0,
atol=1e-08)
for fstim in fstims:
i = np.abs(freqs1 - fstim).argmin()
self.assertTrue(Pxx1m[i] > Pxx1m[i+1])
self.assertTrue(Pxx1m[i] > Pxx1m[i-1])
Pxx2, freqs2, t2 = mlab.specgram(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='onesided')
Pxx2m = np.mean(Pxx2, axis=1)
np.testing.assert_array_equal(freqs2, self.freqss)
np.testing.assert_array_equal(t2, self.t)
np.testing.assert_allclose(np.diff(Pxx2, axis=1).max(), 0,
atol=1e-08)
for fstim in fstims:
i = np.abs(freqs2 - fstim).argmin()
self.assertTrue(Pxx2m[i] > Pxx2m[i+1])
self.assertTrue(Pxx2m[i] > Pxx2m[i-1])
Pxx3, freqs3, t3 = mlab.specgram(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='twosided')
Pxx3m = np.mean(Pxx3, axis=1)
np.testing.assert_array_equal(freqs3, self.freqsd)
np.testing.assert_array_equal(t3, self.t)
np.testing.assert_allclose( | np.diff(Pxx3, axis=1) | numpy.diff |
import numpy as np
from numpy.linalg import inv, svd
from skcv.multiview.two_views.fundamental_matrix import *
from ._triangulate_kanatani_cython import _triangulate_kanatani
def _triangulate_hartley(x1, x2, f_matrix, P1, P2):
"""
triangulates points according to
<NAME> and <NAME> (2003). \"Multiple View Geometry in computer vision.\"
"""
n_points = x1.shape[1]
#3D points
x_3d = np.zeros((4, n_points))
for i in range(n_points):
t = np.eye(3)
tp = np.eye(3)
# define transformation
t[0, 2] = -x1[0, i]
t[1, 2] = -x1[1, i]
tp[0, 2] = -x2[0, i]
tp[1, 2] = -x2[1, i]
# translate matrix F
f = np.dot(inv(tp).T, np.dot(f_matrix, inv(t)))
# find normalized epipoles
e = right_epipole(f)
ep = left_epipole(f)
e /= (e[0] ** 2 + e[1] ** 2)
ep /= (ep[0] ** 2 + ep[1] ** 2)
r = np.array(((e[0], e[1], 0), (-e[1], e[0], 0), (0, 0, 1)))
rp = np.array(((ep[0], ep[1], 0), (-ep[1], ep[0], 0), (0, 0, 1)))
f = np.dot(rp, np.dot(f, r.T))
f1 = e[2]
f2 = ep[2]
a = f[1, 1]
b = f[1, 2]
c = f[2, 1]
d = f[2, 2]
# build a degree 6 polynomial
coeffs = np.zeros(7)
coeffs[0] = -(2 * a ** 2 * c * d * f1 ** 4 - 2 * a * b * c ** 2 * f1 ** 4)
coeffs[1] = -(-2 * a ** 4 - 4 * a * 2 * c ** 2 * f2 ** 2 + 2 * a ** 2 * d ** 2 * f1 ** 4 -
2 * b ** 2 * c ** 2 * f1 ** 4 - 2 * c ** 4 * f2 ** 4)
coeffs[2] = - (-8 * a ** 3 * b + 4 * a ** 2 * c * d * f1 ** 2 -
8 * a ** 2 * c * d * f2 ** 2 - 4 * a * b * c ** 2 * f1 ** 2 -
8 * a * b * c ** 2 * f2 ** 2 + 2 * a * b * d ** 2 * f1 ** 4 -
2 * b ** 2 * c * d * f1 ** 4 - 8 * c ** 3 * d * f2 ** 4)
coeffs[3] = - (-12 * a ** 2 * b ** 2 + 4 * a ** 2 * d ** 2 * f1 ** 2 -
4 * a ** 2 * d ** 2 * f2 ** 2 - 16 * a * b * c * d * f2 ** 2 -
4 * b ** 2 * c ** 2 * f1 ** 2 - 4 * b ** 2 * c ** 2 * f2 ** 2 -
12 * c ** 2 * d ** 2 * f2 ** 4)
coeffs[4] = - (2 * a ** 2 * c * d - 8 * a * b ** 3 - 2 * a * b * c ** 2 +
4 * a * b * d ** 2 * f1 ** 2 - 8 * a * b * d ** 2 * f2 ** 2 -
4 * b ** 2 * c * d * f1 ** 2 - 8 * b ** 2 * c * d * f2 ** 2 -
8 * c * d ** 3 * f2 ** 4)
coeffs[5] = - (2 * a ** 2 * d ** 2 - 2 * b ** 4 - 2 * b ** 2 * c ** 2 -
4 * b ** 2 * d ** 2 * f2 ** 2 - 2 * d ** 4 * f2 ** 4)
coeffs[6] = -2 * a * b * d ** 2 + 2 * b ** 2 * c * d
roots = np.roots(coeffs)
# evaluate the polinomial at the roots and +-inf
vals = np.hstack((roots, [1e20]))
min_s = 1e200
min_v = 0
# check all the polynomial roots
for k in range(len(vals)):
x = | np.real(vals[k]) | numpy.real |
#basics
from typing import List, Dict, Sequence, Tuple, Union
import numpy as np
from scipy import stats
#deepsig
from deepsig import aso
#segnlp
from .array import ensure_numpy
def statistical_significance_test(a:np.ndarray, b:np.ndarray, ss_test="aso"):
"""
Tests if there is a significant difference between two distributions. Normal distribtion not needed.
Two tests are supported. We prefer 1) (see https://www.aclweb.org/anthology/P19-1266.pdf)
:
1) Almost Stochastic Order
Null-hypothesis:
H0 : aso-value >= 0.5
i.e. ASO is not a p-value and instead the threshold is different. We want our score to be
below 0.5, the lower it is the more sure we can be that A is better than B.
2) <NAME>
Null-hypothesis:
H0: P is not significantly different from 0.5
HA: P is significantly different from 0.5
p-value >= .05
1) is prefered
"""
if ss_test == "aso":
v = aso(a, b)
return v <= 0.5, v
elif ss_test == "mwu":
v = stats.mannwhitneyu(a, b, alternative='two-sided')
return v <= 0.05, v
else:
raise RuntimeError(f"'{ss_test}' is not a supported statistical significance test. Choose between ['aso', 'mwu']")
def compare_dists(a:Sequence, b:Sequence, ss_test="aso"):
"""
This function compares two approaches --lets call these A and B-- by comparing their score
distributions over n number of seeds.
first we need to figure out the proability that A will produce a higher scoring model than B. Lets call this P.
If P is higher than 0.5 we cna say that A is better than B, BUT only if P is significantly different from 0.5.
To figure out if P is significantly different from 0.5 we apply a significance test.
https://www.aclweb.org/anthology/P19-1266.pdf
https://export.arxiv.org/pdf/1803.09578
"""
a = np.sort(ensure_numpy(a))[::-1]
b = np.sort(ensure_numpy(b))[::-1]
if | np.array_equal(a, b) | numpy.array_equal |
import numpy as np
from sfsimodels.models.abstract_models import PhysicalObject
from sfsimodels.models.systems import TwoDSystem
from sfsimodels.functions import interp_left, interp2d, interp3d
from .fns import remove_close_items, build_ele2_node_array
import hashlib
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds]
def adjust_slope_points_for_removals(sds, x, removed_y, retained_y):
for sd in sds:
for i in range(2):
if sd[0][i] == x and sd[1][i] == removed_y:
sd[1][i] = retained_y
def adj_slope_by_layers(xm, ym, sgn=1):
"""
Given mesh coordinates, adjust the mesh to be match the slope by adjust each layer
bottom left and top right coords of mesh are the slope
Parameters
----------
xm
ym
x_slope - NOT needed
y_slope
Returns
-------
"""
# TODO use centroid formula - and use o3plot to get ele-coords
ym = sgn * np.array(ym)
xm = sgn * np.array(xm)
if sgn == -1:
xm = xm[::-1]
ym = ym[::-1]
nh = len(ym[0]) - 1
# dy = min([(ym[0][-1] - ym[0][0]) / nh, (ym[-1][-1] - ym[-1][0]) / nh, 0.2])
dy1 = min([(ym[-1][-1] - ym[-1][0]) / nh])
dy0 = 0.2
y0s = ym[0][0] + np.arange(nh + 1) * dy0
y1s = ym[-1][-1] - np.arange(nh + 1) * dy1
y1s = y1s[::-1]
for i in range(nh + 1):
ym[:, i] = np.interp(xm[:, i], [xm[0][0], xm[-1][-1]], [y0s[i], y1s[i]])
xm[:, i] = xm[:, 0]
y_centres_at_xns = (ym[1:] + ym[:-1]) / 2
y_centres = (y_centres_at_xns[:, 1:] + y_centres_at_xns[:, :-1]) / 2
# get x-coordinates of centres of relevant elements
included_ele = []
dy_inds = len(ym[0, :]) - 1
for i in range(0, dy_inds):
# account for shift before assessing position of centroid
xcens = (xm[1:, i] + xm[:-1, i]) / 2 + 0.375 * (xm[1:, -1] - xm[:-1, -1])
y_surf_at_x_cens = np.interp(xcens, [xm[0][0], xm[-1][-1]], [ym[0][0], ym[-1][-1]])
inds = np.where(y_centres[:, i] < y_surf_at_x_cens)
if len(inds[0]):
included_ele.append(inds[0][0])
else:
included_ele.append(len(y_surf_at_x_cens))
included_ele.append(len(y_surf_at_x_cens))
new_xm = xm
new_ym = ym
for j in range(1, nh + 1):
new_ym[included_ele[0], j] += dy1
for i in range(1, dy_inds + 1):
x_ind_adj = included_ele[i - 1]
x_ind_adj_next = included_ele[i]
if x_ind_adj == x_ind_adj_next:
continue
# shift by half of the ele
dx = (xm[x_ind_adj + 1, i] - xm[x_ind_adj, i]) * 0.5
dxs = np.interp(xm[x_ind_adj:x_ind_adj_next, i], [xm[x_ind_adj, i], xm[x_ind_adj_next, i]], [dx, 0])
new_xm[x_ind_adj:x_ind_adj_next, i] = xm[x_ind_adj:x_ind_adj_next, i] + dxs
for j in range(i + 1, nh + 1):
new_ym[x_ind_adj_next, j] += dy1
if sgn == -1:
new_xm = new_xm[::-1]
new_ym = new_ym[::-1]
return new_xm * sgn, new_ym * sgn
def calc_centroid(xs, ys):
import numpy as np
x0 = np.array(xs)
y0 = np.array(ys)
x1 = np.roll(xs, 1, axis=-1)
y1 = np.roll(ys, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc, yc
def calc_mesh_centroids(fem):
x_inds = []
y_inds = []
if hasattr(fem.y_nodes[0], '__len__'): # can either have varying y-coordinates or single set
n_y = len(fem.y_nodes[0])
else:
n_y = 0
import numpy as np
for xx in range(len(fem.soil_grid)):
x_ele = [xx, xx + 1, xx + 1, xx]
x_inds += [x_ele for i in range(n_y - 1)]
for yy in range(len(fem.soil_grid[xx])):
y_ele = [yy, yy, yy + 1, yy + 1]
y_inds.append(y_ele)
n_eles = len(np.array(x_inds))
x_inds = np.array(x_inds).flatten()
y_inds = np.array(y_inds).flatten()
x0 = np.array(fem.x_nodes[x_inds, y_inds])
y0 = np.array(fem.y_nodes[x_inds, y_inds])
x0 = x0.reshape((n_eles, 4))
y0 = y0.reshape((n_eles, 4))
x1 = np.roll(x0, 1, axis=-1)
y1 = np.roll(y0, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc.reshape(len(fem.soil_grid), len(fem.soil_grid[0])), yc.reshape(len(fem.soil_grid), len(fem.soil_grid[0]))
class FiniteElementVary2DMeshConstructor(object): # maybe FiniteElementVertLine2DMesh
_soils = None
x_index_to_sp_index = None
_inactive_value = 1000000
def __init__(self, tds, dy_target, x_scale_pos=None, x_scale_vals=None, dp: int = None, fd_eles=0, auto_run=True,
use_3d_interp=False, smooth_surf=False, force_x2d=False, min_scale=0.5, max_scale=2.0,
allowable_slope=0.25, smooth_ratio=1.):
"""
Builds a finite element mesh of a two-dimension system
Parameters
----------
tds: TwoDSystem
A two dimensional system of models
dy_target: float
Target height of elements
x_scale_pos: array_like
x-positions used to provide scale factors for element widths
x_scale_vals: array_like
scale factors for element widths
dp: int
Number of decimal places
fd_eles: int
if =0 then elements corresponding to the foundation are removed, else provide element id
smooth_surf: bool
if true then changes in angle of the slope must be less than 90 degrees, builds VaryXY mesh
"""
self.min_scale = min_scale
self.max_scale = max_scale
self.allowable_slope = allowable_slope
self.smooth_ratio = smooth_ratio
assert isinstance(tds, TwoDSystem)
self.tds = tds
self.dy_target = dy_target
if x_scale_pos is None:
x_scale_pos = [0, tds.width]
if x_scale_vals is None:
x_scale_vals = [1., 1.]
self.x_scale_pos = np.array(x_scale_pos)
self.x_scale_vals = np.array(x_scale_vals)
self.dp = dp
self.xs = list(self.tds.x_sps)
self.smooth_surf = smooth_surf
self.xs.append(tds.width)
self.xs = np.array(self.xs)
inds = np.where(np.array(tds.x_surf) <= tds.width)
self.x_surf = np.array(tds.x_surf)[inds]
if tds.width not in self.x_surf:
self.x_surf = np.insert(self.x_surf, len(self.x_surf), tds.width)
self.y_surf = np.interp(self.x_surf, tds.x_surf, tds.y_surf)
self.y_surf_at_sps = np.interp(self.xs, tds.x_surf, tds.y_surf)
self._soils = []
self._soil_hashes = []
for i in range(len(self.tds.sps)):
for yy in range(1, self.tds.sps[i].n_layers + 1):
sl = self.tds.sps[i].layer(yy)
if sl.unique_hash not in self._soil_hashes:
self._soil_hashes.append(sl.unique_hash)
self._soils.append(sl)
self.y_surf_at_xcs = None
self.yd = None
self.xcs_sorted = None
self.sds = None
self.y_blocks = None
self.y_coords_at_xcs = None
self.x_nodes = None
self.y_nodes = None
self.x_nodes2d = None
self._femesh = None
if auto_run:
self.get_special_coords_and_slopes() # Step 1
self.set_init_y_blocks()
self.adjust_blocks_to_be_consistent_with_slopes()
self.trim_grid_to_target_dh()
self.build_req_y_node_positions()
self.set_x_nodes()
if use_3d_interp:
self.build_y_coords_grid_via_3d_interp()
else:
self.build_y_coords_grid_via_propagation()
if self.dp is not None:
self.set_to_decimal_places()
if smooth_surf:
self.adjust_for_smooth_surface()
self.set_soil_ids_to_vary_xy_grid()
elif force_x2d:
self.x_nodes2d = self.x_nodes[:, np.newaxis] * np.ones_like(self.y_nodes)
self.set_soil_ids_to_vary_xy_grid()
else:
self.set_soil_ids_to_vary_y_grid()
self.create_mesh()
if smooth_surf:
self.femesh.tidy_unused_mesh()
if not fd_eles:
self.exclude_fd_eles()
def get_special_coords_and_slopes(self):
"""Find the coordinates, layer boundaries and surface slopes that should be maintained in the FE mesh"""
fd_coords = []
x_off = 0.0
yd = {}
for i in range(len(self.x_surf)):
yd[self.x_surf[i]] = []
if self.tds.width not in yd:
yd[self.tds.width] = []
sds = [] # slope dict (stored left-to-right and bottom-to-top)
for i in range(len(self.tds.bds)):
x_bd = self.tds.x_bds[i]
bd = self.tds.bds[i]
fd_centre_x = x_bd + bd.x_fd
y_surf = | np.interp(fd_centre_x, self.x_surf, self.y_surf) | numpy.interp |
import argparse
import os
import shutil
import multiprocessing
import numpy as np
from PIL import Image
from joblib import Parallel, delayed
from sklearn.metrics import average_precision_score
import torch
def restricted_float(x, inter):
x = float(x)
if x < inter[0] or x > inter[1]:
raise argparse.ArgumentTypeError("%r not in range [1e-5, 1e-4]" % (x,))
return x
def create_dict_texts(texts):
texts = sorted(list(set(texts)))
d = {l: i for i, l in enumerate(texts)}
return d
def numeric_classes(tags_classes, dict_tags):
num_classes = np.array([dict_tags.get(t) for t in tags_classes])
return num_classes
def prec(actual, predicted, k):
act_set = set(actual)
pred_set = set(predicted[:k])
if k is not None:
pr = len(act_set & pred_set) / min(k, len(pred_set))
else:
pr = len(act_set & pred_set) / max(len(pred_set), 1)
return pr
def rec(actual, predicted, k):
act_set = set(actual)
pred_set = set(predicted[:k])
re = len(act_set & pred_set) / max(len(act_set), 1)
return re
def precak(sim, str_sim, k=None):
act_lists = [np.nonzero(s)[0] for s in str_sim]
pred_lists = | np.argsort(-sim, axis=1) | numpy.argsort |
import torch
from torch import nn
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torch.nn import MSELoss
from models.AED.simpleAED import Encoder, Decoder, RecurrentAutoencoder
from models.AED.simpleAED import EncoderFlex, DecoderFlex, RecurrentAEDFlex
# from models.AED.simpleAED import EncoderFlex
import copy
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from torch import nn, optim
import pandas as pd
import torch.nn.functional as F
#%%
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#load the data
per_unit = np.load('data/whole_data_ss.pkl', allow_pickle=True)
labels = np.load('data/new_aug_labels_806_824_836_846.npy')
bus_data = pd.read_excel('data/ss.xlsx')
network = pd.read_excel('data/edges.xlsx')
# normalize data
new_data = []
for f in per_unit:
if f == 806:
concat_data = per_unit[f]
elif f in [824, 836, 846]:
concat_data = | np.concatenate((concat_data, per_unit[f])) | numpy.concatenate |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import scipy
import scipy.stats
import cv2
import os
import emnist_helpers
def save_metadata(metadata, contour_path, batch_id):
# Converts metadata (list of lists) into an nparray, and then saves
metadata_path = os.path.join(contour_path, 'metadata')
if not os.path.exists(metadata_path):
os.makedirs(metadata_path)
metadata_fn = str(batch_id) + '.npy'
np.save(os.path.join(metadata_path,metadata_fn), metadata)
# Accumulate metadata
def accumulate_meta(array, im_subpath, seg_sub_path, im_filename, nimg,
image_category, letter_img_indices):
# NEW VERSION
array += [[im_subpath, seg_sub_path, im_filename, nimg, image_category] + letter_img_indices]
return array
# Accumulate metadata
def accumulate_meta_segment(array, im_subpath, seg_sub_path, im_filename, nimg,
letter_img_indices):
# NEW VERSION
array += [[im_subpath, seg_sub_path, im_filename, nimg] + letter_img_indices]
return array
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def translate_coord(coord, orientation, dist, allow_float=False):
y_displacement = float(dist)*np.sin(orientation)
x_displacement = float(dist)*np.cos(orientation)
if allow_float is True:
new_coord = [coord[0]+y_displacement, coord[1]+x_displacement]
else:
new_coord = [int(np.ceil(coord[0] + y_displacement)), int(np.ceil(coord[1] + x_displacement))]
return new_coord
def get_availability_notouch(im, com_on_im, radius, canvas_size,
existing_canvas=None, existing_deg=None,
min_separation_deg=45, min_separation_px=15):
if (min_separation_deg>180):
return ValueError('min_separation_deg should be leq than 180')
# Filter available positions to prevent overlap
if existing_canvas is not None:
print('placing second letter')
im_inverted = im[::-1,::-1]
com_on_im_inverted = [im.shape[0]-com_on_im[0]-1, im.shape[1]-com_on_im[1]-1]
h_offset = com_on_im[0]-com_on_im_inverted[0]
w_offset = com_on_im[1]-com_on_im_inverted[1]
pad_thickness = ((np.maximum(h_offset, 0) + min_separation_px, | np.maximum(-h_offset, 0) | numpy.maximum |
import numpy as np
a1 = np.ones((2, 3), int)
print(a1)
# [[1 1 1]
# [1 1 1]]
a2 = | np.full((2, 3), 2) | numpy.full |
# -*- coding: utf-8 -*-
import numpy as np
import ot as pot
import scipy.stats
def transport_stable_learnGrowth(C, lambda1, lambda2, epsilon, scaling_iter, g, numInnerItermax=None, tau=None,
epsilon0=None, extra_iter=1000, growth_iters=3):
"""
Compute the optimal transport with stabilized numerics.
Args:
C: cost matrix to transport cell i to cell j
lambda1: regularization parameter for marginal constraint for p.
lambda2: regularization parameter for marginal constraint for q.
epsilon: entropy parameter
scaling_iter: number of scaling iterations
g: growth value for input cells
"""
for i in range(growth_iters):
if i == 0:
rowSums = g
else:
rowSums = Tmap.sum(axis=1) / Tmap.shape[1]
Tmap = transport_stablev2(C, lambda1, lambda2, epsilon,
scaling_iter, rowSums, numInnerItermax=numInnerItermax, tau=tau,
epsilon0=epsilon0)
return Tmap
def transport_stablev2(C, lambda1, lambda2, epsilon, scaling_iter, g, numInnerItermax=None, tau=None,
epsilon0=None, extra_iter=1000):
"""
Compute the optimal transport with stabilized numerics.
Args:
C: cost matrix to transport cell i to cell j
lambda1: regularization parameter for marginal constraint for p.
lambda2: regularization parameter for marginal constraint for q.
epsilon: entropy parameter
scaling_iter: number of scaling iterations
g: growth value for input cells
"""
warm_start = tau is not None
epsilon_final = epsilon
def get_reg(n): # exponential decreasing
return (epsilon0 - epsilon_final) * np.exp(-n) + epsilon_final
epsilon_i = epsilon0 if warm_start else epsilon
dx = np.ones(C.shape[0]) / C.shape[0]
dy = np.ones(C.shape[1]) / C.shape[1]
p = g
q = np.ones(C.shape[1]) * np.average(g)
u = np.zeros(len(p))
v = np.zeros(len(q))
b = np.ones(len(q))
K = np.exp(-C / epsilon_i)
alpha1 = lambda1 / (lambda1 + epsilon_i)
alpha2 = lambda2 / (lambda2 + epsilon_i)
epsilon_index = 0
iterations_since_epsilon_adjusted = 0
for i in range(scaling_iter):
# scaling iteration
a = (p / (K.dot(np.multiply(b, dy)))) ** alpha1 * np.exp(-u / (lambda1 + epsilon_i))
b = (q / (K.T.dot(np.multiply(a, dx)))) ** alpha2 * np.exp(-v / (lambda2 + epsilon_i))
# stabilization
iterations_since_epsilon_adjusted += 1
if (max(max(abs(a)), max(abs(b))) > tau):
u = u + epsilon_i * | np.log(a) | numpy.log |
#================================================================================
# <NAME> [marion dot neumann at uni-bonn dot de]
# <NAME> [marthaler at ge dot com]
# <NAME> [shan dot huang at iais dot fraunhofer dot de]
# <NAME> [kristian dot kersting at cs dot tu-dortmund dot de]
#
# This file is part of pyGP_PR.
# The software package is released under the BSD 2-Clause (FreeBSD) License.
#
# Copyright (c) by
# <NAME>, <NAME>, <NAME> & <NAME>, 20/05/2013
#================================================================================
# likelihood functions are provided to be used by the gp.py function:
#
# likErf (Error function, classification, probit regression)
# likLogistic [NOT IMPLEMENTED!] (Logistic, classification, logit regression)
# likUni [NOT IMPLEMENTED!] (Uniform likelihood, classification)
#
# likGauss (Gaussian, regression)
# likLaplace [NOT IMPLEMENTED!] (Laplacian or double exponential, regression)
# likSech2 [NOT IMPLEMENTED!] (Sech-square, regression)
# likT [NOT IMPLEMENTED!] (Student's t, regression)
#
# likPoisson [NOT IMPLEMENTED!] (Poisson regression, count data)
#
# likMix [NOT IMPLEMENTED!] (Mixture of individual covariance functions)
#
# The likelihood functions have three possible modes, the mode being selected
# as follows (where "lik" stands for any likelihood function defined as lik*):
#
# 1) With one or no input arguments: [REPORT NUMBER OF HYPERPARAMETERS]
#
# s = lik OR s = lik(hyp)
#
# The likelihood function returns a string telling how many hyperparameters it
# expects, using the convention that "D" is the dimension of the input space.
# For example, calling "likLogistic" returns the string '0'.
#
#
# 2) With three or four input arguments: [PREDICTION MODE]
#
# lp = lik(hyp, y, mu) OR [lp, ymu, ys2] = lik(hyp, y, mu, s2)
#
# This allows to evaluate the predictive distribution. Let p(y_*|f_*) be the
# likelihood of a test point and N(f_*|mu,s2) an approximation to the posterior
# marginal p(f_*|x_*,x,y) as returned by an inference method. The predictive
# distribution p(y_*|x_*,x,y) is approximated by.
# q(y_*) = \int N(f_*|mu,s2) p(y_*|f_*) df_*
#
# lp = log( q(y) ) for a particular value of y, if s2 is [] or 0, this
# corresponds to log( p(y|mu) )
# ymu and ys2 the mean and variance of the predictive marginal q(y)
# note that these two numbers do not depend on a particular
# value of y
# All vectors have the same size.
#
#
# 3) With five or six input arguments, the fifth being a string [INFERENCE MODE]
#
# [varargout] = lik(hyp, y, mu, s2, inf) OR
# [varargout] = lik(hyp, y, mu, s2, inf, i)
#
# There are three cases for inf, namely a) infLaplace, b) infEP and c) infVB.
# The last input i, refers to derivatives w.r.t. the ith hyperparameter.
#
# a1) [lp,dlp,d2lp,d3lp] = lik(hyp, y, f, [], 'infLaplace')
# lp, dlp, d2lp and d3lp correspond to derivatives of the log likelihood
# log(p(y|f)) w.r.t. to the latent location f.
# lp = log( p(y|f) )
# dlp = d log( p(y|f) ) / df
# d2lp = d^2 log( p(y|f) ) / df^2
# d3lp = d^3 log( p(y|f) ) / df^3
#
# a2) [lp_dhyp,dlp_dhyp,d2lp_dhyp] = lik(hyp, y, f, [], 'infLaplace', i)
# returns derivatives w.r.t. to the ith hyperparameter
# lp_dhyp = d log( p(y|f) ) / ( dhyp_i)
# dlp_dhyp = d^2 log( p(y|f) ) / (df dhyp_i)
# d2lp_dhyp = d^3 log( p(y|f) ) / (df^2 dhyp_i)
#
#
# b1) [lZ,dlZ,d2lZ] = lik(hyp, y, mu, s2, 'infEP')
# let Z = \int p(y|f) N(f|mu,s2) df then
# lZ = log(Z)
# dlZ = d log(Z) / dmu
# d2lZ = d^2 log(Z) / dmu^2
#
# b2) [dlZhyp] = lik(hyp, y, mu, s2, 'infEP', i)
# returns derivatives w.r.t. to the ith hyperparameter
# dlZhyp = d log(Z) / dhyp_i
#
#
# c1) [h,b,dh,db,d2h,d2b] = lik(hyp, y, [], ga, 'infVB')
# ga is the variance of a Gaussian lower bound to the likelihood p(y|f).
# p(y|f) \ge exp( b*f - f.^2/(2*ga) - h(ga)/2 ) \propto N(f|b*ga,ga)
# The function returns the linear part b and the "scaling function" h(ga) and derivatives
# dh = d h/dga
# db = d b/dga
# d2h = d^2 h/dga
# d2b = d^2 b/dga
#
# c2) [dhhyp] = lik(hyp, y, [], ga, 'infVB', i)
# dhhyp = dh / dhyp_i, the derivative w.r.t. the ith hyperparameter
#
# Cumulative likelihoods are designed for binary classification. Therefore, they
# only look at the sign of the targets y; zero values are treated as +1.
#
# See the documentation for the individual likelihood for the computations specific
# to each likelihood function.
#
# @author: <NAME> (Fall 2012)
# Substantial updates by Shan Huang (Sep. 2013)
#
# This is a python implementation of gpml functionality (Copyright (c) by
# <NAME> and <NAME>, 2013-01-21).
#
# Copyright (c) by <NAME> and <NAME>, 20/05/2013
import numpy as np
from scipy.special import erf
import src.Tools.general
def likErf(hyp=None, y=None, mu=None, s2=None, inffunc=None, der=None, nargout=None):
''' likErf - Error function or cumulative Gaussian likelihood function for binary
classification or probit regression. The expression for the likelihood is
likErf(t) = (1+erf(t/sqrt(2)))/2 = normcdf(t).
Several modes are provided, for computing likelihoods, derivatives and moments
respectively, see lik.py for the details. In general, care is taken
to avoid numerical issues when the arguments are extreme.
'''
if mu == None:
return [0] # report number of hyperparameters
if not y == None:
y = np.sign(y)
y[y==0] = 1
else:
y = 1; # allow only +/- 1 values
if inffunc == None: # prediction mode if inf is not present
y = y*np.ones_like(mu) # make y a vector
s2zero = True;
if not s2 == None:
if np.linalg.norm(s2)>0:
s2zero = False # s2==0 ?
if s2zero: # log probability evaluation
[p,lp] = _cumGauss(y,mu,2)
else: # prediction
lp = src.Tools.general.feval(['likelihoods.likErf'],hyp, y, mu, s2, 'inferences.infEP',None,1)
p = np.exp(lp)
if nargout>1:
ymu = 2*p-1 # first y moment
if nargout>2:
ys2 = 4*p*(1-p) # second y moment
varargout = [lp,ymu,ys2]
else:
varargout = [lp,ymu]
else:
varargout = lp
else: # inference mode
if inffunc == 'inferences.infLaplace':
if der == None: # no derivative mode
f = mu; yf = y*f # product latents and labels
[p,lp] = _cumGauss(y,f,2)
if nargout>1: # derivative of log likelihood
n_p = _gauOverCumGauss(yf,p)
dlp = y*n_p # derivative of log likelihood
if nargout>2: # 2nd derivative of log likelihood
d2lp = -n_p**2 - yf*n_p
if nargout>3: # 3rd derivative of log likelihood
d3lp = 2*y*n_p**3 + 3*f*n_p**2 + y*(f**2-1)*n_p
varargout = [lp,dlp,d2lp,d3lp]
else:
varargout = [lp,dlp,d2lp]
else:
varargout = [lp,dlp]
else:
varargout = lp
else: # derivative mode
varargout = nargout*[] # derivative w.r.t. hypers
if inffunc == 'inferences.infEP':
if der == None: # no derivative mode
z = mu/np.sqrt(1+s2)
[junk,lZ] = _cumGauss(y,z,2) # log part function
if not y == None:
z = z*y
if nargout>1:
if y == None:
y = 1
n_p = _gauOverCumGauss(z,np.exp(lZ))
dlZ = y*n_p/np.sqrt(1.+s2) # 1st derivative wrt mean
if nargout>2:
d2lZ = -n_p*(z+n_p)/(1.+s2) # 2nd derivative wrt mean
varargout = [lZ,dlZ,d2lZ]
else:
varargout = [lZ,dlZ]
else:
varargout = lZ
else: # derivative mode
varargout = 0 # deriv. wrt hyp.lik
#end
if inffunc == 'inferences.infVB':
if der == None: # no derivative mode
# naive variational lower bound based on asymptotical properties of lik
# normcdf(t) -> -(t*A_hat^2-2dt+c)/2 for t->-np.inf (tight lower bound)
d = 0.158482605320942;
c = -1.785873318175113;
ga = s2; n = len(ga); b = d*y*np.ones((n,1)); db = np.zeros((n,1)); d2b = db
h = -2.*c*np.ones((n,1)); h[ga>1] = np.inf; dh = np.zeros((n,1)); d2h = dh
varargout = [h,b,dh,db,d2h,d2b]
else: # derivative mode
varargout = [] # deriv. wrt hyp.lik
return varargout
def _cumGauss(y=None,f=None,nargout=1):
''' Safe implementation of the log of phi(x) = \int_{-\infty}^x N(f|0,1) df
_logphi(z) = log(normcdf(z))
'''
if not y == None:
yf = y*f
else:
yf = f
# product of latents and labels
p = (1. + erf(yf/np.sqrt(2.)))/2. # likelihood
if nargout>1:
lp = _logphi(yf,p)
return p,lp
else:
return p
def _logphi(z,p):
lp = np.zeros_like(z) # initialize
zmin = -6.2; zmax = -5.5;
ok = z>zmax # safe evaluation for large values
bd = z<zmin # use asymptotics
nok = np.logical_not(ok)
ip = np.logical_and(nok,np.logical_not(bd)) # interpolate between both of them
lam = 1/(1.+np.exp( 25.*(0.5-(z[ip]-zmin)/(zmax-zmin)) )) # interp. weights
lp[ok] = np.log(p[ok])
# use lower and upper bound acoording to Abramowitz&Stegun 7.1.13 for z<0
# lower -log(pi)/2 -z.^2/2 -log( sqrt(z.^2/2+2 ) -z/sqrt(2) )
# upper -log(pi)/2 -z.^2/2 -log( sqrt(z.^2/2+4/pi) -z/sqrt(2) )
# the lower bound captures the asymptotics
lp[nok] = -np.log(np.pi)/2. -z[nok]**2/2. - np.log( np.sqrt(z[nok]**2/2.+2.) - z[nok]/np.sqrt(2.) )
lp[ip] = (1-lam)*lp[ip] + lam*np.log( p[ip] )
return lp
def _gauOverCumGauss(f,p):
n_p = np.zeros_like(f) # safely compute Gaussian over cumulative Gaussian
ok = f>-5 # naive evaluation for large values of f
n_p[ok] = (np.exp(-f[ok]**2/2)/np.sqrt(2*np.pi)) / p[ok]
bd = f<-6 # tight upper bound evaluation
n_p[bd] = np.sqrt(f[bd]**2/4+1)-f[bd]/2
interp = np.logical_and(np.logical_not(ok),np.logical_not(bd)) # linearly interpolate between both of them
tmp = f[interp]
lam = -5. - f[interp]
n_p[interp] = (1-lam)*(np.exp(-tmp**2/2)/np.sqrt(2*np.pi))/p[interp] + \
lam *(np.sqrt(tmp**2/4+1)-tmp/2);
return n_p
def likGauss(hyp=None, y=None, mu=None, s2=None, inffunc=None, der=None, nargout=1):
''' likGauss - Gaussian likelihood function for regression. The expression for the likelihood is
likGauss(t) = exp(-(t-y)^2/2*sn^2) / sqrt(2*pi*sn^2),
where y is the mean and sn is the standard deviation.
The hyperparameters are:
hyp = [ log(sn) ]
Several modes are provided, for computing likelihoods, derivatives and moments
respectively, see lik.py for the details. In general, care is taken
to avoid numerical issues when the arguments are extreme.
'''
if mu == None:
return [1] # report number of hyperparameters
sn2 = np.exp(2.*hyp)
if inffunc == None: # prediction mode if inffunc is not present
if y == None:
y = np.zeros_like(mu)
s2zero = True
if not (s2 == None):
if np.linalg.norm(s2) > 0:
s2zero = False
if s2zero: # s2==0 ?
lp = -(y-mu)**2 /sn2/2 - np.log(2.*np.pi*sn2)/2. # log probability
s2 = 0.
else:
lp = src.Tools.general.feval(['likelihoods.likGauss'],hyp, y, mu, s2, 'inferences.infEP',None,1) # prediction
if nargout>1:
ymu = mu; # first y moment
if nargout>2:
ys2 = s2 + sn2; # second y moment
varargout = [lp,ymu,ys2]
else:
varargout = [lp,ymu]
else:
varargout = lp
else:
if inffunc == 'inferences.infLaplace':
if der == None: # no derivative mode
if y == None: y=0 #end
ymmu = y-mu
lp = -ymmu**2/(2*sn2) - np.log(2*np.pi*sn2)/2.
if nargout>1:
dlp = ymmu/sn2 # dlp, derivative of log likelihood
if nargout>2: # d2lp, 2nd derivative of log likelihood
d2lp = - | np.ones_like(ymmu) | numpy.ones_like |
import numpy as np
import pytest
import tensorflow as tf
import tensorflow.keras.losses
from PrognosAIs.Model.Losses import DICE_loss
from PrognosAIs.Model.Losses import CoxLoss
from PrognosAIs.Model.Losses import MaskedCategoricalCrossentropy
# ===============================================================
# Masked Categorical Crossentropy
# ===============================================================
def test_maskedcategoricalcrossentropy_no_masks():
y_true = [[1, 0], [0, 1], [1, 0], [0, 1], [0, 1], [1, 0], [1, 0], [1, 0], [1, 0]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().call(y_true, y_pred)
loss_function = MaskedCategoricalCrossentropy()
result = loss_function.call(y_true, y_pred)
assert isinstance(loss_function, MaskedCategoricalCrossentropy)
assert tf.rank(result) == 1
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy_no_masks_total_loss():
y_true = [[1, 0], [0, 1], [1, 0], [0, 1], [0, 1], [1, 0], [1, 0], [1, 0], [1, 0]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
y_true = tf.convert_to_tensor(y_true)
y_pred = tf.convert_to_tensor(y_pred)
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().__call__(y_true, y_pred)
loss_function = MaskedCategoricalCrossentropy()
result = loss_function.__call__(y_true, y_pred)
assert isinstance(loss_function, MaskedCategoricalCrossentropy)
assert tf.rank(result) == 0
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy_no_masks_multi_dim():
y_true = [[[1, 0], [0, 1], [1, 0]], [[0, 1], [0, 1], [1, 0]], [[1, 0], [1, 0], [1, 0]]]
y_pred = [
[[0.8, 0.2], [0.3, 0.7], [0.1, 0.9]],
[[0.8, 0.2], [0.3, 0.7], [0.1, 0.9]],
[[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]],
]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().call(y_true, y_pred)
loss_function = MaskedCategoricalCrossentropy()
result = loss_function.call(y_true, y_pred)
assert isinstance(loss_function, MaskedCategoricalCrossentropy)
assert tf.rank(result) == 2
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy():
y_true = [[1, 0], [0, 1], [1, 0], [0, 0], [0, 1], [1, 0], [-1, -1], [-1, -1], [-1, -1]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
is_masked = [False, False, False, True, False, False, True, True, True]
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
is_masked = np.asarray(is_masked)
y_true_masked = y_true[np.logical_not(is_masked)]
y_pred_masked = y_pred[np.logical_not(is_masked)]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().call(y_true, y_pred)
true_masked_loss = tensorflow.keras.losses.CategoricalCrossentropy().call(
y_true_masked, y_pred_masked,
)
loss_function = MaskedCategoricalCrossentropy(mask_value=-1)
result = loss_function.call(y_true, y_pred)
assert tf.is_tensor(result)
assert tf.rank(result) == 1
assert tf.shape(result) == tf.shape(true_loss)
assert np.all(result.numpy() >= 0)
assert result.shape[0] == len(y_true)
assert np.all(result.numpy()[is_masked] == 0)
assert result.numpy()[np.logical_not(is_masked)] == pytest.approx(true_masked_loss.numpy())
def test_maskedcategoricalcrossentropy_total_loss():
y_true = [[1, 0], [0, 1], [1, 0], [-1, -1], [0, 1], [1, 0], [-1, -1], [-1, -1], [-1, -1]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
is_masked = [False, False, False, True, False, False, True, True, True]
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
is_masked = np.asarray(is_masked)
y_true_masked = y_true[np.logical_not(is_masked)]
y_pred_masked = y_pred[np.logical_not(is_masked)]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().__call__(
y_true_masked, y_pred_masked,
)
loss_function = MaskedCategoricalCrossentropy()
result = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(result)
assert tf.rank(result) == 0
assert np.all(result.numpy() >= 0)
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy_class_weights():
y_true = [[1, 0], [0, 1], [1, 0], [-1, -1], [0, 1], [1, 0], [-1, -1], [-1, -1], [-1, -1]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
is_masked = [False, False, False, True, False, False, True, True, True]
class_weights = {1: 5, 0: 1}
sample_weights = np.asarray([1, 5, 1, 0, 5, 1, 0, 0, 0])
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
is_masked = np.asarray(is_masked)
y_true_masked = y_true[np.logical_not(is_masked)]
y_pred_masked = y_pred[np.logical_not(is_masked)]
sample_weights_masked = sample_weights[np.logical_not(is_masked)]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().call(
y_true_masked, y_pred_masked,
)
loss_function = MaskedCategoricalCrossentropy(class_weight=class_weights)
result = loss_function.call(y_true, y_pred)
assert tf.is_tensor(result)
assert tf.rank(result) == 1
assert tf.reduce_all(tf.math.greater_equal(result, 0))
assert result.numpy()[np.logical_not(is_masked)] == pytest.approx(
true_loss.numpy() * sample_weights_masked,
)
def test_maskedcategoricalcrossentropy_total_loss_class_weights():
y_true = [[1, 0], [0, 1], [1, 0], [-1, -1], [0, 1], [1, 0], [-1, -1], [-1, -1], [-1, -1]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
is_masked = [False, False, False, True, False, False, True, True, True]
class_weights = {1: 5, 0: 1}
sample_weights = np.asarray([1, 5, 1, 0, 5, 1, 0, 0, 0])
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
is_masked = np.asarray(is_masked)
y_true_masked = y_true[np.logical_not(is_masked)]
y_pred_masked = y_pred[np.logical_not(is_masked)]
sample_weights_masked = sample_weights[np.logical_not(is_masked)]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().__call__(
y_true_masked, y_pred_masked, sample_weight=sample_weights_masked,
)
loss_function = MaskedCategoricalCrossentropy(class_weight=class_weights)
result = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(result)
assert tf.rank(result) == 0
assert np.all(result.numpy() >= 0)
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy_total_loss_sample_weights():
y_true = [[1, 0], [0, 1], [1, 0], [-1, -1], [0, 1], [1, 0], [-1, -1], [-1, -1], [-1, -1]]
y_pred = [
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.8, 0.2],
[0.3, 0.7],
[0.1, 0.9],
[0.5, 0.5],
[0.5, 0.5],
[0.5, 0.5],
]
is_masked = [False, False, False, True, False, False, True, True, True]
sample_weights = np.asarray([1, 5, 1, 0, 5, 1, 0, 0, 0])
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
is_masked = np.asarray(is_masked)
y_true_masked = y_true[np.logical_not(is_masked)]
y_pred_masked = y_pred[np.logical_not(is_masked)]
sample_weights_masked = sample_weights[np.logical_not(is_masked)]
true_loss = tensorflow.keras.losses.CategoricalCrossentropy().__call__(
y_true_masked, y_pred_masked, sample_weight=sample_weights_masked,
)
loss_function = MaskedCategoricalCrossentropy()
result = loss_function.__call__(y_true, y_pred, sample_weight=sample_weights)
assert tf.is_tensor(result)
assert tf.rank(result) == 0
assert np.all(result.numpy() >= 0)
assert result.numpy() == pytest.approx(true_loss.numpy())
def test_maskedcategoricalcrossentropy_serializable():
loss_function = MaskedCategoricalCrossentropy(mask_value=3, class_weight={0: 0.5, 1: 317})
result = tf.keras.losses.serialize(loss_function)
assert isinstance(result, dict)
assert result["config"]["mask_value"] == 3
assert result["config"]["class_weight"] == {"0": "0.5", "1": "317"}
def test_maskedcategoricalcrossentropy_deserializable():
loss_function = MaskedCategoricalCrossentropy
result = tf.keras.losses.deserialize(
"MaskedCategoricalCrossentropy",
custom_objects={"MaskedCategoricalCrossentropy": loss_function},
)
assert isinstance(result, MaskedCategoricalCrossentropy)
# ===============================================================
# DICE_loss score
# ===============================================================
def test_DICE():
loss_function = DICE_loss()
assert isinstance(loss_function, DICE_loss)
y_true = [
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
]
y_pred = [
[[[0.0, 0, 0], [0, 0, 0]], [[1.0, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
]
# Add a dimension for the channels
y_true = np.expand_dims(y_true, -1)
y_pred = np.expand_dims(y_pred, -1)
true_loss = [1 / 3, 0, 1]
losses = loss_function.call(y_true, y_pred)
assert tf.is_tensor(losses)
assert tf.rank(losses) == 1
losses = losses.numpy()
assert losses.shape[0] == len(y_true)
assert losses == pytest.approx(np.asarray(true_loss))
total_loss = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(total_loss)
assert tf.rank(total_loss) == 0
total_loss = total_loss.numpy()
assert total_loss == pytest.approx(np.mean(losses))
def test_DICE_one_hot():
loss_function = DICE_loss()
assert isinstance(loss_function, DICE_loss)
y_true = [
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
]
y_pred = [
[[[0, 0, 0], [0, 0, 0]], [[1, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
]
# Add a dimension for the channels
y_true = tf.cast(tf.one_hot(y_true, 2), tf.float32)
y_pred = tf.cast(tf.one_hot(y_pred, 2), tf.float32)
true_loss_foreground = [1 / 3, 0, 1]
true_loss_background = [0, 0, 0]
true_loss = (np.asarray(true_loss_foreground) + np.asarray(true_loss_background)) / 2.0
losses = loss_function.call(y_true, y_pred)
assert tf.is_tensor(losses)
assert tf.rank(losses) == 1
losses = losses.numpy()
assert losses.shape[0] == len(y_true)
assert losses == pytest.approx(np.asarray(true_loss))
total_loss = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(total_loss)
assert tf.rank(total_loss) == 0
total_loss = total_loss.numpy()
assert total_loss == pytest.approx(np.mean(losses))
def test_DICE_one_hot_multi_class():
loss_function = DICE_loss()
assert isinstance(loss_function, DICE_loss)
y_true = [
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [2, 2, 2]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[2, 2, 2], [2, 2, 2]], [[2, 2, 2], [2, 2, 2]]],
]
y_pred = [
[[[0, 0, 0], [0, 0, 0]], [[1, 1, 1], [1, 1, 1]]],
# Sample 2
[[[1, 1, 1], [2, 2, 2]], [[1, 1, 1], [1, 1, 1]]],
# Sample 3
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
]
# Add a dimension for the channels
# y_true = np.expand_dims(y_true, -1)
y_true = tf.cast(tf.one_hot(y_true, 3), tf.float32)
y_pred = tf.cast(tf.one_hot(y_pred, 3), tf.float32)
true_loss_class0 = [0, 0, 0]
true_loss_class1 = [1 / 3, 0, 0]
true_loss_class2 = [0, 0, 1]
true_loss = (
1
/ 3
* (
np.asarray(true_loss_class0)
+ np.asarray(true_loss_class1)
+ np.asarray(true_loss_class2)
)
)
losses = loss_function.call(y_true, y_pred)
assert tf.is_tensor(losses)
assert tf.rank(losses) == 1
losses = losses.numpy()
assert losses.shape[0] == len(y_true)
assert losses == pytest.approx(np.asarray(true_loss))
total_loss = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(total_loss)
assert tf.rank(total_loss) == 0
total_loss = total_loss.numpy()
assert total_loss == pytest.approx(np.mean(losses))
def test_DICE_zeros():
loss_function = DICE_loss()
assert isinstance(loss_function, DICE_loss)
y_true = np.zeros([3, 5, 5, 5, 1])
y_pred = np.zeros([3, 5, 5, 5, 1])
true_loss = [0, 0, 0]
losses = loss_function.call(y_true, y_pred)
assert tf.is_tensor(losses)
assert tf.rank(losses) == 1
losses = losses.numpy()
assert losses.shape[0] == len(y_true)
assert losses == pytest.approx(np.asarray(true_loss))
total_loss = loss_function.__call__(y_true, y_pred)
assert tf.is_tensor(total_loss)
assert tf.rank(total_loss) == 0
total_loss = total_loss.numpy()
assert total_loss == pytest.approx( | np.mean(losses) | numpy.mean |
# Author: <NAME>
# Contributors: <NAME>, <NAME>
import numpy as np
import torch
from nose.tools import raises
from cgnet.feature.utils import (GaussianRBF, PolynomialCutoffRBF,
ShiftedSoftplus, _AbstractRBFLayer)
from cgnet.feature.statistics import GeometryStatistics
from cgnet.feature.feature import GeometryFeature, Geometry
# Define sizes for a pseudo-dataset
frames = np.random.randint(10, 30)
beads = np.random.randint(5, 10)
g = Geometry(method='torch')
@raises(NotImplementedError)
def test_radial_basis_function_len():
# Make sure that a NotImplementedError is raised if an RBF layer
# does not have a __len__() method
# Here, we use the _AbstractRBFLayer base class as our RBF
abstract_RBF = _AbstractRBFLayer()
# Next, we check to see if the NotImplementedError is raised
# This is done using the decorator above, because we cannot
# use nose.tools.assert_raises directly on special methods
len(abstract_RBF)
def test_radial_basis_function():
# Make sure radial basis functions are consistent with manual calculation
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = np.exp(gamma * magnitude_squared)
# Shapes and values need to be the same
np.testing.assert_equal(centers.shape, rbf.centers.shape)
np.testing.assert_allclose(gauss_layer.numpy(), gauss_manual, rtol=1e-5)
def test_radial_basis_function_distance_masking():
# Makes sure that if a distance mask is used, the corresponding
# expanded distances returned by GaussianRBF are zero
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
n_gaussians = np.random.randint(5, 10)
neighbor_cutoff = np.abs(np.random.rand())
neighbors, neighbor_mask = g.get_neighbors(distances,
cutoff=neighbor_cutoff)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances, distance_mask=neighbor_mask)
# Lastly, we check to see that the application of the mask is correct
# against a manual calculation and masking
centers = np.linspace(low_cutoff, high_cutoff, n_gaussians)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = torch.tensor(np.exp(gamma * magnitude_squared))
gauss_manual = gauss_manual * neighbor_mask[:, :, :, None].double()
np.testing.assert_array_almost_equal(gauss_layer.numpy(),
gauss_manual.numpy())
def test_radial_basis_function_normalize():
# Tests to make sure that the output of GaussianRBF is properly
# normalized if 'normalize_output' is specified as True
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance,
normalize_output=True)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = | np.exp(gamma * magnitude_squared) | numpy.exp |
# -*- coding: utf-8 -*-
import numpy as np
from GAparsimony import GAparsimony, Population
import pytest
from .utilTest import autoargs, readJSONFile
# Clase generica, permite instanciarla con diferentes atributos.
class GenericClass(object):
@autoargs()
def __init__(self,**kawargs):
pass
#################################################
#***************TEST POPULATION*****************#
#################################################
@pytest.mark.parametrize("population", [(readJSONFile('./test/outputs/population.json'))])
def test_GAParsimony_regression_boston_population(population):
pop = Population(population["params"], columns=population["features"])
model = GenericClass(popSize=population["popSize"], seed_ini=population["seed"], feat_thres=population["feat_thres"], population=pop)
pop.population = GAparsimony._population(model, type_ini_pop="improvedLHS")
assert (pop.population==np.array(population["population_resultado"])).all()
data = readJSONFile('./test/outputs/populationClass.json')
population = Population(data["params"], data["features"], np.array(data["population"]))
@pytest.mark.parametrize("population, slice, value, resultado",
[(population,(slice(2), slice(None)), np.arange(20), np.array(data["population_1"], dtype=object)),
(population,(slice(2), slice(None)), np.array([np.arange(20), np.arange(1, 21)]), np.array(data["population_2"], dtype=object)),
(population,(slice(2), slice(None)), 0, np.array(data["population_3"], dtype=object)),
(population,(1, slice(2)), 1, | np.array(data["population_4"], dtype=object) | numpy.array |
from typing import List
import csv
from tqdm import trange
import numpy as np
def normalize_spectra(spectra: List[List[float]], phase_features: List[List[float]] = None, phase_mask: List[List[float]] = None, batch_size: int = 50, excluded_sub_value: float = None, threshold: float = None) -> List[List[float]]:
"""
Function takes in spectra and normalize them to sum values to 1. If provided with phase mask information, will remove excluded spectrum regions.
:param spectra: Input spectra with shape (num_spectra, spectrum_length).
:param phase_features: The collection phase of spectrum with shape (num_spectra, num_phases).
:param phase_mask: A mask array showing where in each phase feature to include in predictions and training with shape (num_phases, spectrum_length)
:param batch_size: The size of batches to carry out the normalization operation in.
:param exlcuded_sub_value: Excluded values are replaced with this object, usually None or nan.
:param threshold: Spectra values below threshold are replaced with threshold to remove negative or zero values.
:return: List form array of spectra with shape (num_spectra, spectrum length) with exlcuded values converted to nan.
"""
normalized_spectra = []
phase_exclusion = phase_mask is not None and phase_features is not None
if phase_exclusion:
phase_mask = np.array(phase_mask)
num_iters, iter_step = len(spectra), batch_size
for i in trange(0, num_iters, iter_step):
# prepare batch
batch_spectra = spectra[i:i + iter_step]
batch_mask = np.array([[x is not None for x in b] for b in batch_spectra])
batch_spectra = np.array([[0 if x is None else x for x in b] for b in batch_spectra])
if phase_exclusion:
batch_phases = phase_features[i:i + iter_step]
batch_phases = np.array(batch_phases)
# exclude mask and apply threshold
if threshold is not None:
batch_spectra[batch_spectra < threshold] = threshold
if phase_exclusion:
batch_phase_mask = np.matmul(batch_phases, phase_mask).astype('bool')
batch_mask = ~(~batch_mask + ~batch_phase_mask) # mask shows True only if both components true
batch_spectra[~batch_mask] = 0
# normalize to sum to 1
sum_spectra = np.sum(batch_spectra, axis=1, keepdims=True)
batch_spectra = batch_spectra / sum_spectra
# Collect vectors and revert excluded values to None
batch_spectra = batch_spectra.astype('object')
batch_spectra[~batch_mask] = excluded_sub_value
batch_spectra = batch_spectra.tolist()
normalized_spectra.extend(batch_spectra)
return normalized_spectra
def roundrobin_sid(spectra: np.ndarray, threshold: float = None) -> List[float]:
"""
Takes a block of input spectra and makes a pairwise comparison between each of the input spectra for a given molecule,
returning a list of the spectral informations divergences. To be used evaluating the variation between an ensemble of model spectrum predictions.
:spectra: A 3D array containing each of the spectra to be compared. Shape of (num_spectra, spectrum_length, ensemble_size)
:threshold: SID calculation requires positive values in each position, this value is used to replace any zero or negative values.
:return: A list of average pairwise SID len (num_spectra)
"""
ensemble_size=spectra.shape[2]
spectrum_size=spectra.shape[1]
ensemble_sids=[]
for i in range(len(spectra)):
spectrum = spectra[i]
nan_mask= | np.isnan(spectrum[:,0]) | numpy.isnan |
import os
import time
import random
import h5py
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from cnn_models import *
def to_categorical(train_labels):
ret = np.zeros((len(train_labels), train_labels.max() + 1))
ret[np.arange(len(ret)), train_labels] = 1
return ret
def clip_by_tensor(t, t_min, t_max):
result = (t >= t_min).float() * t + (t < t_min).float() * t_min
result = (result <= t_max).float() * result + (result >
t_max).float() * t_max
return result
def loss_mul(y_hat, label):
# Cross-entropy loss
y_hat = torch.softmax(y_hat, dim=1)
log_prob = torch.log(clip_by_tensor(y_hat, 1e-10, 1))
loss = -torch.sum(log_prob * label, dim=1)
return loss
def data_loader(sub_dir, dataset, error_rate):
print('Start loading the dataset...')
if dataset == 'imagewoof':
classes = [
'Shih-Tzu', 'Rhodesian ridgeback', 'Australian terrier', 'Samoyed',
'Dingo'
]
elif dataset == 'Flowers':
classes = ['daisy', 'dandelion', 'rose', 'sunflower', 'tulip']
train_data = h5py.File(
os.path.join(sub_dir, dataset, f'{dataset}_train.h5'), 'r')
test_data = h5py.File(os.path.join(sub_dir, dataset, f'{dataset}_test.h5'),
'r')
val_data = h5py.File(os.path.join(sub_dir, dataset, f'{dataset}_val.h5'),
'r')
X_train = train_data['x_train'][:]
Y_train = train_data['y_train'][:]
X_test = test_data['x_test'][:]
Y_test = test_data['y_test'][:]
X_val = val_data['x_val'][:]
Y_val = val_data['y_val'][:]
y_train_e = Y_train.copy()
x_train = np.swapaxes(np.swapaxes(X_train, 1, 3), 2, 3)
y_train = to_categorical(Y_train)
x_test = np.swapaxes( | np.swapaxes(X_test, 1, 3) | numpy.swapaxes |
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import ast
from body_brain_nca import BodyBrainNCA
from modular_light_chaser import ModularLightChaser
from modular_carrier import ModularCarrier
from PIL import Image
import matplotlib
import utils
# from matplotlib.ticker import PercentFormatter
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
EPISODES = 4
def save_heatmap_elites(folder_path, features, loss, g=None):
unique_modules = list(np.unique([t[2] for t in features]))
unique_module_n = len(unique_modules)
loss = np.asarray(loss)
features_per_module_n = [[] for _ in range(unique_module_n)]
loss_per_module_n = [[] for _ in range(unique_module_n)]
for f,l in zip(features, loss):
features_per_module_n[unique_modules.index(f[2])].append((f[0], f[1]))
loss_per_module_n[unique_modules.index(f[2])].append(l)
max_loss = max(abs(loss))
min_loss = min(abs(loss))
sqrt_unique_module_n = np.sqrt(unique_module_n)
rows = int(np.round(sqrt_unique_module_n))
cols = int(np.ceil(sqrt_unique_module_n))
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
for i in range(rows*cols):
plt.subplot(rows,cols,i+1)
valid_subplot = False
if i < unique_module_n:
plt.title("Modules: "+str(unique_modules[i]), fontsize=10)
if len(features_per_module_n[i]) > 0:
heatmap_size = max([max(t[0]+1, t[1]) for t in features_per_module_n[i]])
heatmap = np.full((heatmap_size-1, heatmap_size-1), np.nan)
for j,f in enumerate(features_per_module_n[i]):
heatmap[-1*f[1]+1, f[0]-1] = -loss_per_module_n[i][j]
plt.imshow(heatmap, extent=[0.5,heatmap_size-0.5,1.5,heatmap_size+0.5],
cmap="cividis", vmax=max_loss, vmin=min_loss)
plt.xticks(list(range(1,heatmap_size, (heatmap_size//8)+1)), fontsize=6)
plt.yticks(list(range(2,heatmap_size+1, (heatmap_size//8)+1)), fontsize=6)
plt.xlabel("Sensors", fontsize=8)
plt.ylabel("Actuators", fontsize=8)
valid_subplot = True
if not valid_subplot:
plt.axis('off')
fig.subplots_adjust(right=0.8, wspace=0.6, hspace=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
#plt.colorbar(cax=cbar_ax, ticks=np.linspace(min_loss, max_loss, 10))
plt.colorbar(cax=cbar_ax)
if g is None:
fig.savefig(os.path.join(folder_path, "load_save_elites.png"), format='png', dpi=300)
fig.savefig(os.path.join(folder_path, "load_save_elites.svg"), format='svg')
else:
fig.savefig(os.path.join(folder_path, "%06d_elites.png"%(g)), format='png', dpi=300)
plt.close("all")
# max_module = max(unique_modules)
# total_elites = 0
# for m in range(3,max_module):
# total_elites += sum([n for n in range(1,m-1)]) )
total_elites = 2024
len_elites = len(loss)
lines=[]
lines.append("Percentage elites: "+str(len_elites / total_elites) + "\n")
lines.append("QD-score: "+str(np.sum(-loss) / total_elites))
with open(os.path.join(folder_path, "elites_stats.txt"), "w") as fstats:
fstats.writelines(lines)
plt.hist(-loss, 200, facecolor='b', alpha=0.75, weights=np.ones(len(loss)) / len(loss))
# plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.xlim(0.25, 0.75)
plt.ylim(0, 0.15)
plt.xlabel('Fitness')
plt.ylabel('Probability')
plt.savefig(os.path.join(folder_path, "hist_elites.png"), format='png', dpi=300)
plt.close()
def generate_map_elites_video(args, flat_weights, video_filename):
# print(type(flat_weights))
env_simulation_steps = args.env_max_iter
nca = BodyBrainNCA(**args.nca_model)
nca.dmodel.summary()
weight_shape_list, weight_amount_list, _ = utils.get_weights_info(nca.weights)
print(weight_shape_list, weight_amount_list)
shaped_weight = utils.get_model_weights(flat_weights, weight_amount_list,
weight_shape_list)
nca.dmodel.set_weights(shaped_weight)
with utils.VideoWriter(video_filename, fps=10) as vid:
for episode in range(EPISODES):
x = np.zeros((1, args.ca_height, args.ca_width,nca.channel_n),dtype=np.float32)
x[:,args.ca_height//2,args.ca_width//2,:1] = 1.0
has_bodytype = False
if hasattr(args, 'bodytype'):
if args.bodytype is not None:
has_bodytype = True
if has_bodytype:
body_grid = utils.manual_body_grid(args.bodytype, 1)
x = nca.body_grid_2_fixed_body_nca(body_grid)
else:
body_grid, x = nca.build_body(x)
print("body_grid", body_grid)
if nca.body_sensor_n == 2:
env = ModularCarrier(body_grid[0], False, predefined_ball_idx=episode)
else:
env = ModularLightChaser(body_grid[0], False, predefined_light_idx=episode)
observations = env.reset()
for t in range(env_simulation_steps):
env_img = env.render(mode="rgb_array")
vid.add(env_img)
x, actions = nca.act(x, [observations])
observations, reward, done, info = env.step(actions[0])
env.close()
def load_save_elites(folder_path, save_body=False, save_video=False,
carrier_env=False):
csv_file_path = os.path.join(folder_path, "elites.csv")
elites = pd.read_csv(csv_file_path, delimiter=";")
df_map_elites_features = elites["map_elites_features"]
df_map_elites_loss = elites["map_elites_loss"]
df_map_elites_body = elites["map_elites_body"]
map_elites_features = [ast.literal_eval(t) for t in df_map_elites_features]
map_elites_body = [ast.literal_eval(b) for b in df_map_elites_body]
save_heatmap_elites(folder_path, map_elites_features, df_map_elites_loss)
if save_body:
body_folder = os.path.join(folder_path, "map_elites_body")
if not os.path.exists(body_folder):
os.makedirs(body_folder)
for i in range(len(map_elites_body)):
if carrier_env:
env = ModularCarrier( | np.array(map_elites_body[i]) | numpy.array |
from __future__ import division
import csv, gzip, os, sys
import numpy as np
import pandas as pd
from time import time
from operator import itemgetter
class PileupAlignment:
def __init__(self, chrom, chrom_len, chrom_pileups, min_depth):
self.desc = '' #not used for now
# chrom has identifier like 'cluster123', which mark a division on the core-genome, which is a conserved region found cross all reference genomes
self.chrom = chrom
self.pileups = chrom_pileups
self.n_samples = len(chrom_pileups.keys())
self.sample_ids = sorted(chrom_pileups.keys())
self.ncols = chrom_len # number of sites
self.gentle_check()
"""
attributes below were described in function update()
"""
self.local_pos = np.arange(self.ncols)
self.counts_list = self.make_counts_list()
self.pooled_counts = self.make_pooled_counts()
self.pooled_depth = self.pooled_counts[0:4,:].sum(0)
self.freq_mat = []
self.ref_alleles = []
self.alt_alleles = []
self.third_alleles = []
self.forth_alleles = []
self.ref_prob_mat = []
self.alt_prob_mat = []
self.third_prob_mat = []
self.forth_prob_mat = []
self.sample_presence = []
self.ref_freqs = []
self.alt_freqs = []
self.third_freqs = []
self.forth_freqs = []
self.prevalence = []
self.aligned_pctg = []
self.update()
def gentle_check(self):
gentle_msg = "something might have gone horribly wrong"
if self.chrom is None:
sys.stderr.write("chrom name is None, {}".format(gentle_msg))
if self.n_samples <= 1:
sys.stderr.write("one or zero sequences for encapsulation, {}".format(gentle_msg))
if any(len(sample_id) == 0 for sample_id in self.sample_ids):
sys.stderr.write("weird sample ids were found, {}".format(gentle_msg))
if any([any(self.pileups[sample_id][:,1] > self.ncols) for sample_id in self.sample_ids]):
sys.stderr.write("sequences were aligned out of the scope of chrom, {}".format(gentle_msg))
def _make_pooled_counts(self):
templ = np.repeat(0, 5 * self.ncols).reshape((5, self.ncols))
for samp_id in self.sample_ids:
plps = self.pileups[samp_id]
plp_all_pos = np.array([plp.pos for plp in plps])
plp_all_counts = np.transpose(np.array([plp.allele_list() for plp in plps]))
# print plp_all_counts
# print templ
#
# print len(plp_all_pos)
# print plp_all_counts.shape
# print templ.shape
# print templ[:,plp_all_pos-1].shape
templ[:,plp_all_pos-1] += plp_all_counts
return templ
def make_allele_probs(self):
n = len(self.sample_ids)
def make_pooled_counts(self):
templ = np.repeat(np.int32(0), 5 * self.ncols).reshape((5, self.ncols))
for counts in self.counts_list:
templ += counts
return templ
def make_counts_list(self):
templs = []
for samp_id in self.sample_ids:
sub_templ = np.repeat(np.int32(0), 5 * self.ncols).reshape((5, self.ncols))
plps = self.pileups[samp_id]
plp_all_pos = plps[:,1].astype(int)
plp_all_counts = np.transpose(plps[:,4:]).astype(np.int32)
sub_templ[:,plp_all_pos-1] += plp_all_counts
templs.append(sub_templ)
return templs
def update(self, min_depth=1):
"""
char_template: complete set of chars for each site on the sequences, from which the ref allele and alt allele will be selected
"""
char_template = np.array([
np.repeat('A', self.pooled_counts.shape[1]),
np.repeat('T', self.pooled_counts.shape[1]),
np.repeat('G', self.pooled_counts.shape[1]),
np.repeat('C', self.pooled_counts.shape[1])
# np.repeat('N', self.pooled_counts.shape[1])
])
"""
sorting the counts of different chars at each site,
then select the indices of chars whose counts are in top 2,
then using the indices of chars to select the chars,
then the top 1 char (the char with highest count) at each site is considered as ref allele for now,
then the char with the second highest count at each site is considered as alt allele for now,
for those sites that have only one char, the counts of other chars are zeroes, so theorectically any of them can be selected by program, but in reality '-' will be selected, no exception was found.
"""
count_inds_mat = self.pooled_counts[0:4,:].argsort(axis=0)
top2_inds = count_inds_mat[-4:,]
top2_char_mat = np.choose(top2_inds, char_template)
self.ref_alleles = top2_char_mat[3,:]
self.alt_alleles = top2_char_mat[2,:]
self.third_alleles = top2_char_mat[1,:]
self.forth_alleles = top2_char_mat[0,:]
# print self.ref_alleles
# print self.alt_alleles
"""
frequency matrix has the same shape as the char matrix
it is initialize to have None only,
in the end, it is used to store the presence/absence of ref/alt allele for each site cross all samples with the following rules:
- presence of ref allele: 1
- absence of ref allele: 0
- presence of N or -: None
"""
self.freq_mat = np.repeat(None, self.n_samples*self.ncols).reshape((self.n_samples, self.ncols))
self.ref_freq_mat = np.repeat(None, self.n_samples*self.ncols).reshape((self.n_samples, self.ncols))
self.alt_freq_mat = np.repeat(None, self.n_samples*self.ncols).reshape((self.n_samples, self.ncols))
self.third_freq_mat = np.repeat(None, self.n_samples*self.ncols).reshape((self.n_samples, self.ncols))
self.forth_freq_mat = np.repeat(None, self.n_samples*self.ncols).reshape((self.n_samples, self.ncols))
self.ref_prob_mat = np.repeat(np.float16(0), self.n_samples * self.ncols).reshape((self.n_samples, self.ncols))
self.alt_prob_mat = np.repeat(np.float16(0), self.n_samples * self.ncols).reshape((self.n_samples, self.ncols))
self.third_prob_mat = np.repeat(np.float16(0), self.n_samples * self.ncols).reshape((self.n_samples, self.ncols))
self.forth_prob_mat = np.repeat(np.float16(0), self.n_samples * self.ncols).reshape((self.n_samples, self.ncols))
# print self.counts_list
for i, count_mat in enumerate(self.counts_list):
top2_count_mat = | np.choose(top2_inds, count_mat[0:4,:]) | numpy.choose |
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import os
import scipy
import imageio
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.colors import ListedColormap
import statsmodels.api as sm
import pandas as pd
from statsmodels.stats.anova import AnovaRM
from sklearn import linear_model
from helper_code.registration_funcs import model_arena, get_arena_details
from helper_code.processing_funcs import speed_colors
from helper_code.analysis_funcs import *
from important_code.shuffle_test import permutation_test, permutation_correlation
plt.rcParams.update({'font.size': 30})
def plot_traversals(self):
''' plot all traversals across the arena '''
# initialize parameters
sides = ['back', 'front']
# sides = ['back']
types = ['spontaneous'] #, 'evoked']
fast_color = np.array([.5, 1, .5])
slow_color = np.array([1, .9, .9])
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
edge_vector_color = np.array([.98, .9, .6])**4
homing_vector_color = np.array([0, 0, 0])
non_escape_color = np.array([0,0,0])
condition_colors = [[.5,.5,.5], [.3,.5,.8], [0,.7,1]]
time_thresh = 15 #20 for ev comparison
speed_thresh = 2
p = 0
HV_cutoff = .681 # .5 for exploratory analysis
# initialize figures
fig, fig2, fig3, ax, ax2, ax3 = initialize_figures_traversals(self) #, types = len(types)+1)
# initialize lists for stats
all_data = []
all_conditions = []
edge_vector_time_all = np.array([])
# loop over spontaneous vs evoked
for t, type in enumerate(types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
strategies = [0, 0, 0]
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# initialize edginess
all_traversals_edgy = {}
all_traversals_homy = {}
proportion_edgy = {}
for s in sides:
all_traversals_edgy[s] = []
all_traversals_homy[s] = []
proportion_edgy[s] = []
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse in the experiment
for i, mouse in enumerate(self.analysis[experiment][condition]['back traversal']):
mouse_data = []
print(mouse)
# loop over back and front sides
for s, start in enumerate(sides):
if start == 'front' and type == 'evoked': continue
# find all the paths across the arena
traversal = self.analysis[experiment][condition][start + ' traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
if traversal[t*5]:
x_end_loc = np.array([x_loc[-1] * scaling_factor for x_loc in np.array(traversal[t * 5 + 0])[:, 0]])
if traversal[4] < 10: continue
number_of_edge_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) > HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) ) / min(traversal[4], time_thresh) * time_thresh
# print(traversal[4])
number_of_homing_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) < HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) )/ min(traversal[4], time_thresh) * time_thresh
all_traversals_edgy[start].append( number_of_edge_vectors )
all_traversals_homy[start].append(number_of_homing_vectors)
# print(number_of_edge_vectors)
mouse_data.append(number_of_edge_vectors)
# get the time of edge vectors
if condition == 'obstacle' and 'wall' in experiment:
edge_vector_idx = ( (np.array(traversal[t * 5 + 3]) < speed_thresh) * (np.array(traversal[t * 5 + 2]) > HV_cutoff) )
edge_vector_time = np.array(traversal[t*5+1])[edge_vector_idx] / 30 / 60
edge_vector_time_all = np.concatenate((edge_vector_time_all, edge_vector_time))
# prop_edgy = np.sum((np.array(traversal[t*5 + 3]) < speed_thresh) * \
# (np.array(traversal[t*5 + 2]) > HV_cutoff) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60)) / \
# np.sum((np.array(traversal[t * 5 + 3]) < speed_thresh) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60))
else:
all_traversals_edgy[start].append(0)
all_traversals_homy[start].append(0)
# if np.isnan(prop_edgy): prop_edgy = .5
# prop_edgy = prop_edgy / .35738
# proportion_edgy[start].append(prop_edgy)
traversal_coords = np.array(traversal[t*5+0])
pre_traversal = np.array(traversal[10])
else:
# all_traversals_edginess[start].append(0)
continue
m += .5
# loop over all paths
show = False
if show and traversal:
for trial in range(traversal_coords.shape[0]):
# make sure it qualifies
if traversal[t * 5 + 3][trial] > speed_thresh: continue
if traversal[t*5+1][trial] > time_thresh*30*60: continue
if not len(pre_traversal[0][0]): continue
# if abs(traversal_coords[trial][0][-1]*scaling_factor - 50) > 30: continue
# downsample to get even coverage
# if c == 2 and np.random.random() > (59 / 234): continue
# if c == 1 and np.random.random() > (59 / 94): continue
if traversal[t*5+2][trial]> HV_cutoff: plot_color = edge_vector_color
else: plot_color = homing_vector_color
display_traversal(scaling_factor, traversal_coords, pre_traversal, trial, path_ax, plot_color)
if mouse_data:
# all_data.append(mouse_data)
all_conditions.append(c)
# save image
path_fig.savefig(os.path.join(self.summary_plots_folder, self.labels[c] + ' traversals.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot the data
if type == 'spontaneous' and len(sides) > 1:
plot_number_edgy = np.array(all_traversals_edgy['front']).astype(float) + np.array(all_traversals_edgy['back']).astype(float)
plot_number_homy = np.array(all_traversals_homy['front']).astype(float) + np.array(all_traversals_homy['back']).astype(float)
print(np.sum(plot_number_edgy + plot_number_homy))
# plot_proportion_edgy = (np.array(proportion_edgy['front']).astype(float) + np.array(proportion_edgy['back']).astype(float)) / 2
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
all_data.append(plot_number_edgy)
else:
plot_number_edgy = np.array(all_traversals_edgy[sides[0]]).astype(float)
plot_number_homy = np.array(all_traversals_homy[sides[0]]).astype(float)
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
# plot_proportion_edgy = np.array(proportion_edgy[sides[0]]).astype(float)
for i, (plot_data, ax0) in enumerate(zip([plot_number_edgy, plot_number_homy], [ax, ax3])): #, plot_proportion_edgy , ax2
print(plot_data)
print(np.sum(plot_data))
# plot each trial
# scatter_axis = scatter_the_axis( (p*4/3+.5/3), plot_data)
ax0.scatter(np.ones_like(plot_data)* (p*4/3+.5/3)* 3 - .2, plot_data, color=[0,0,0, .4], edgecolors='none', s=25, zorder=99)
# do kde
# if i==0: bw = .5
# else: bw = .02
bw = .5
kde = fit_kde(plot_data, bw=bw)
plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=.3, color=[.5, .5, .5], violin=False, clip=True)
ax0.plot([4 * p + -.2, 4 * p + -.2], [np.percentile(plot_data, 25), np.percentile(plot_data, 75)], color = [0,0,0])
ax0.plot([4 * p + -.4, 4 * p + -.0], [np.percentile(plot_data, 50), np.percentile(plot_data, 50)], color = [1,1,1], linewidth = 2)
# else:
# # kde = fit_kde(plot_data, bw=.03)
# # plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=True)
# bp = ax0.boxplot([plot_data, [0, 0]], positions=[4 * p + -.2, -10], showfliers=False, zorder=99)
# ax0.set_xlim([-1, 4 * len(self.experiments) - 1])
p+=1
# plot a stacked bar of strategies
# fig3 = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
# make timing hist
plt.figure()
bins = np.arange(0,22.5,2.5)
plt.hist(edge_vector_time_all, bins = bins, color = [0,0,0], weights = np.ones_like(edge_vector_time_all) / 2.5 / m) #condition_colors[c])
plt.ylim([0,2.1])
plt.show()
# # save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
group_A = [[d] for d in all_data[0]]
group_B = [[d] for d in all_data[2]]
permutation_test(group_A, group_B, iterations = 100000, two_tailed = False)
group_A = [[d] for d in all_data[2]]
group_B = [[d] for d in all_data[1]]
permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
def plot_speed_traces(self, speed = 'absolute'):
''' plot the speed traces '''
max_speed = 60
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
RT, end_idx, scaling_factor, speed_traces, subgoal_speed_traces, time, time_axis, trial_num = \
initialize_variables(number_of_trials, self,sub_experiments)
# create custom colormap
colormap = speed_colormap(scaling_factor, max_speed, n_bins=256, v_min=0, v_max=max_speed)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
for trial in range(len(self.analysis[experiment][condition]['speed'][mouse])):
if trial > 2: continue
trial_num = fill_in_trial_data(RT, condition, end_idx, experiment, mouse, scaling_factor, self,
speed_traces, subgoal_speed_traces, time, trial, trial_num)
# print some useful metrics
print_metrics(RT, end_idx, number_of_mice, number_of_trials)
# put the speed traces on the plot
fig = show_speed_traces(colormap, condition, end_idx, experiment, number_of_trials, speed, speed_traces, subgoal_speed_traces, time_axis, max_speed)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('done')
def plot_escape_paths(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = [np.array([1, .95, .85]), np.array([.98, .9, .6])**4]
homing_vector_color = [ np.array([.725, .725, .725]), np.array([0, 0, 0])]
non_escape_color = np.array([0,0,0])
fps = 30
escape_duration = 18 #6 #9 for food # 18 for U
min_distance_to_shelter = 30
HV_cutoff = 0.681 #.75 #.7
# initialize all data for stats
all_data = [[], [], [], []]
all_conditions = []
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
# more arena stuff for this analysis type
arena_reference = arena_color.copy()
arena_color[arena_reference == 245] = 255
get_arena_details(self, experiment=sub_experiments[0])
shelter_location = [s / scaling_factor / 10 for s in self.shelter_location]
# initialize strategy array
strategies = np.array([0,0,0])
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 9
else:
escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# color based on visual vs tactile obst avoidance
# if mouse == 'CA7190' or mouse == 'CA3210' or mouse == 'CA3155' or mouse == 'CA8100':
# edge_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# homing_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# else:
# edge_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# homing_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# show escape paths
show_escape_paths(HV_cutoff, arena, arena_color, arena_reference, c, condition, edge_vector_color, escape_duration, experiment, fps,
homing_vector_color, min_distance_to_shelter, mouse, non_escape_color, scaling_factor, self, shelter_location, strategies, path_ax,
determine_strategy = False) #('dark' in experiment and condition=='obstacle'))
# save image
# scipy.misc.imsave(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
imageio.imwrite(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot a stacked bar of strategies
fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('escape')
# strategies = np.array([4,5,0])
# fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# plt.show()
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# group_A = [[0],[1],[0,0,0],[0,0],[0,1],[1,0],[0,0,0]]
# group_B = [[1,0,0],[0,0,0,0],[0,0,0],[1,0,0],[0,0,0]]
# permutation_test(group_B, group_A, iterations = 10000, two_tailed = False)
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1]]
# obstacle_exp = [[0,1],[0,0,0,0,1],[0,1],[0]]
open_field = [[1,0,0,0,0],[0,0,0,0,0],[0,0,0,0],[1,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0,0,0]]
# U_shaped = [[0,1],[1,1], [1,1], [0,0,1], [0,0,0], [0], [1], [0], [0,1], [0,1,0,0], [0,0,0]]
# permutation_test(open_field, obstacle, iterations = 10000, two_tailed = False)
# do same edgy homing then stop to both
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0,0],[0,0,0],[1,0,0],[0,0,0],[0,0,1]] #stop at 3 trials
# do same edgy homing then stop to both --> exclude non escapes
obstacle = [[0],[1],[0,0,0],[0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0],[0,0,0],[1,0,0],[0,0,0],[0,1]] #stop at 3 trials
def plot_edginess(self):
# initialize parameters
fps = 30
escape_duration = 12 #9 #6
HV_cutoff = .681 #.681
ETD = 10 #10
traj_loc = 40
edge_vector_color = np.array([.98, .9, .6])**5
edge_vector_color = np.array([.99, .94, .6]) ** 3
# edge_vector_color = np.array([.99, .95, .6]) ** 5
homing_vector_color = np.array([0, 0, 0])
# homing_vector_color = np.array([.85, .65, .8])
# edge_vector_color = np.array([.65, .85, .7])
# colors for diff conditions
colors = [np.array([.7, 0, .3]), np.array([0, .8, .5])]
colors = [np.array([.3,.3,.3]), np.array([1, .2, 0]), np.array([0, .8, .4]), np.array([0, .7, .9])]
colors = [np.array([.3, .3, .3]), np.array([1, .2, 0]), np.array([.7, 0, .7]), np.array([0, .7, .9]), np.array([0,1,0])]
# colors = [np.array([0, 0, 0]), np.array([0, 0, 0]),np.array([0, 0, 0]), np.array([0, 0, 0])]
offset = [0,.2, .2, 0]
# initialize figures
fig, fig2, fig3, fig4, _, ax, ax2, ax3 = initialize_figures(self)
# initialize all data for stats
all_data = [[],[],[],[]]
all_conditions = []
mouse_ID = []; m = 1
dist_data_EV_other_all = []
delta_ICs, delta_x_end = [], []
time_to_shelter, was_escape = [], []
repetitions = 1
for rand_select in range(repetitions):
m = -1
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
num_trials_total = 0
num_trials_escape = 0
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
t_total = 0
# initialize array to fill in with each trial's data
edginess, end_idx, time_since_down, time_to_shelter, time_to_shelter_all, prev_edginess, scaling_factor, time_in_center, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
mouse_ID_trial = edginess.copy()
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 12
else: escape_duration = 12
# elif 'up' in experiment and 'probe' in condition:
# escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
m+=1
# initialize mouse data for stats
mouse_data = [[],[],[],[]]
print(mouse)
skip_mouse = False
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
prev_homings = []
x_edges_used = []
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
# impose conditions
if 'food' in experiment:
if t > 12: continue
if condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
num_trials_total += 1
elif 'void' in experiment:
if t > 5: continue
else:
if t>2: continue
# if trial > 2: continue
num_trials_total += 1
# if trial!=2: continue
# if 'off' in experiment and trial: continue
# if trial < 3 and 'wall down' in experiment: continue
# if condition == 'obstacle' and not 'non' in experiment and \
# self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
# if c == 0 and not (trial > 0): continue
# if c == 1 and not (trial): continue
# if c == 2 and not (trial == 0): continue
# if trial and ('lights on off' in experiment and not 'baseline' in experiment): continue
if 'Square' in experiment:
HV_cutoff = .56
HV_cutoff = 0
y_idx = self.analysis[experiment][condition]['path'][mouse][trial][1]
if y_idx[0] * scaling_factor > 50: continue
else:
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# print(y_start)
# print(x_start)
if y_start > 25: continue
if abs(x_start-50) > 30: continue
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
if np.isnan(end_idx[trial_num]) or (end_idx[trial_num] > escape_duration * fps):
# if not ('up' in experiment and 'probe' in condition and not np.isnan(RT)):
# mouse_data[3].append(0)
continue
''' check for previous edgy homings '''
# if 'dark' in experiment or True:
# num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial)
# # print(num_prev_edge_vectors)
# if num_prev_edge_vectors and c: continue
# if not num_prev_edge_vectors and not c: continue
# if num_prev_edge_vectors < 3 and (c==0): continue
# if num_prev_edge_vectors > 0 and c < 4: continue
# if t>1 and c == 2: continue
# if num_prev_edge_vectors >= 2: print('prev edgy homing'); continue
# if x_edge in x_edges_used: print('prev edgy escape'); continue
#
# print('-----------' + mouse + '--------------')
#
# if self.analysis[experiment][condition]['edginess'][mouse][trial] <= HV_cutoff:
# print(' HV ')
# else:
# print(' EDGY ')
# # edgy trial has occurred
# print('EDGY TRIAL ' + str(trial))
# x_edges_used.append(x_edge)
#
# # select only *with* prev homings
# if not num_prev_edge_vectors:
# if not x_edge in x_edges_used:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] > HV_cutoff:
# x_edges_used.append(x_edge)
# continue
# print(t)
num_trials_escape += 1
# add data
edginess[trial_num] = self.analysis[experiment][condition]['edginess'][mouse][trial]
time_since_down[trial_num] = np.sqrt((x_start - 50)**2 + (y_start - 50)**2 )# self.analysis[experiment][condition]['start angle'][mouse][trial]
print(edginess[trial_num])
if 'Square' in experiment:
if edginess[trial_num] <=-.3: # and False: #.15
edginess[trial_num] = np.nan
continue
# edginess to current edge as opposed to specific edge
if (('moves left' in experiment and condition == 'no obstacle') \
or ('moves right' in experiment and condition== 'obstacle')): # and False:
if edginess[trial_num] <= -0: # and False:
edginess[trial_num] = np.nan
continue
edginess[trial_num] = edginess[trial_num] - 1
# shelter edginess
if False:
y_pos = self.analysis[experiment][condition]['path'][mouse][trial][1][:int(end_idx[trial_num])] * scaling_factor
x_pos = self.analysis[experiment][condition]['path'][mouse][trial][0][:int(end_idx[trial_num])] * scaling_factor
# get the latter phase traj
y_pos_1 = 55
y_pos_2 = 65
x_pos_1 = x_pos[np.argmin(abs(y_pos - y_pos_1))]
x_pos_2 = x_pos[np.argmin(abs(y_pos - y_pos_2))]
#where does it end up
slope = (y_pos_2 - y_pos_1) / (x_pos_2 - x_pos_1)
intercept = y_pos_1 - x_pos_1 * slope
x_pos_proj = (80 - intercept) / slope
# compared to
x_pos_shelter_R = 40 #40.5 # defined as mean of null dist
# if 'long' in self.labels[c]:
# x_pos_shelter_R += 18
# compute the metric
shelter_edginess = (x_pos_proj - x_pos_shelter_R) / 18
edginess[trial_num] = -shelter_edginess
# if condition == 'obstacle' and 'left' in experiment:edginess[trial_num] = -edginess[trial_num] # for putting conditions together
# get previous edginess #TEMPORARY COMMENT
# if not t:
# SH_data = self.analysis[experiment][condition]['prev homings'][mouse][-1]
# time_to_shelter.append(np.array(SH_data[2]))
# was_escape.append(np.array(SH_data[4]))
if False: # or True:
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, delta_ICs, delta_x_end)
print(prev_edginess[trial_num])
print(trial + 1)
print('')
# get time in center
# time_in_center[trial_num] = self.analysis[experiment][condition]['time exploring obstacle'][mouse][trial]
# time_in_center[trial_num] = num_PORHVs
# if num_PORHVs <= 1:
# edginess[trial_num] = np.nan
# continue
# if (prev_edginess[trial_num] < HV_cutoff and not t) or skip_mouse:
# edginess[trial_num] = np.nan
# skip_mouse = True
# continue
''' qualify by prev homings '''
# if prev_edginess[trial_num] < .4: # and c:
# edginess[trial_num] = np.nan
# prev_edginess[trial_num] = np.nan
# continue
num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD = 10)
# print(str(num_prev_edge_vectors) + ' EVs')
#
# if not num_prev_edge_vectors >= 1 and c ==0:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if not num_prev_edge_vectors < 1 and c ==1:
# edginess[trial_num] = np.nan
# t+=1
# continue
# print(num_prev_edge_vectors)
# if num_prev_edge_vectors !=0 and c==3:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if num_prev_edge_vectors != 1 and c == 2:
# edginess[trial_num] = np.nan
# t += 1
# continue
# if num_prev_edge_vectors != 2 and num_prev_edge_vectors != 3 and c ==1:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# if num_prev_edge_vectors < 4 and c ==0:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# print(trial + 1)
# print(prev_edginess[trial_num])
# print(edginess[trial_num])
# print('')
# print(t)
# get time since obstacle removal?
# time_since_down[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment]['probe']['start time'][mouse][0]
# add data for stats
mouse_data[0].append(int(edginess[trial_num] > HV_cutoff))
mouse_data[1].append(edginess[trial_num])
mouse_data[2].append(prev_edginess[trial_num])
mouse_data[3].append(self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment][condition]['start time'][mouse][0])
mouse_ID_trial[trial_num] = m
t += 1
t_total += 1
#append data for stats
if mouse_data[0]:
all_data[0].append(mouse_data[0])
all_data[1].append(mouse_data[1])
all_data[2].append(mouse_data[2])
all_data[3].append(mouse_data[3])
all_conditions.append(c)
mouse_ID.append(m); m+= 1
else:
print(mouse)
print('0 trials')
# get prev homings
time_to_shelter_all.append(time_to_shelter)
dist_data_EV_other_all = np.append(dist_data_EV_other_all, dist_to_other_SH[edginess > HV_cutoff])
# print(t_total)
''' plot edginess by condition '''
# get the data
# data = abs(edginess)
data = edginess
plot_data = data[~np.isnan(data)]
# print(np.percentile(plot_data, 25))
# print(np.percentile(plot_data, 50))
# print(np.percentile(plot_data, 75))
# print(np.mean(plot_data > HV_cutoff))
# plot each trial
scatter_axis = scatter_the_axis(c, plot_data)
ax.scatter(scatter_axis[plot_data>HV_cutoff], plot_data[plot_data>HV_cutoff], color=edge_vector_color[::-1], s=15, zorder = 99)
ax.scatter(scatter_axis[plot_data<=HV_cutoff], plot_data[plot_data<=HV_cutoff], color=homing_vector_color[::-1], s=15, zorder = 99)
bp = ax.boxplot([plot_data, [0,0]], positions = [3 * c - .2, -10], showfliers=False, zorder=99)
plt.setp(bp['boxes'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['whiskers'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['medians'], linewidth=2)
ax.set_xlim([-1, 3 * len(self.experiments) - 1])
# ax.set_ylim([-.1, 1.15])
ax.set_ylim([-.1, 1.3])
#do kde
try:
if 'Square' in experiment:
kde = fit_kde(plot_data, bw=.06)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=.8, color=[.5,.5,.5], violin=False, clip=False, cutoff = HV_cutoff+0.0000001, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
ax.set_ylim([-1.5, 1.5])
else:
kde = fit_kde(plot_data, bw=.04)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=1.3, color=[.5,.5,.5], violin=False, clip=True, cutoff = HV_cutoff, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
except: pass
# plot the polar plot or initial trajectories
# plt.figure(fig4.number)
fig4 = plt.figure(figsize=( 5, 5))
# ax4 = plt.subplot(1,len(self.experiments),len(self.experiments) - c, polar=True)
ax4 = plt.subplot(1, 1, 1, polar=True)
plt.axis('off')
ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax4.set_xlim([-np.pi / 2 - .1, 0])
# ax4.set_xlim([-np.pi - .1, 0])
mean_value_color = max(0, min(1, np.mean(plot_data)))
mean_value_color = np.sum(plot_data > HV_cutoff) / len(plot_data)
mean_value = np.mean(plot_data)
value_color = mean_value_color * edge_vector_color[::-1] + (1 - mean_value_color) * homing_vector_color[::-1]
ax4.arrow(mean_value + 3 * np.pi / 2, 0, 0, 1.9, color=[abs(v)**1 for v in value_color], alpha=1, width = 0.05, linewidth=2)
ax4.plot([0, 0 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
ax4.plot([0, 1 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
# ax4.plot([0, -1 + 3 * np.pi / 2], [0, 2.25], color=[.5, .5, .5], alpha=1, linewidth=1, linestyle='--')
scatter_axis_EV = scatter_the_axis_polar(plot_data[plot_data > HV_cutoff], 2.25, 0) #0.05
scatter_axis_HV = scatter_the_axis_polar(plot_data[plot_data <= HV_cutoff], 2.25, 0)
ax4.scatter(plot_data[plot_data > HV_cutoff] + 3 * np.pi/2, scatter_axis_EV, s = 30, color=edge_vector_color[::-1], alpha = .8, edgecolors = None)
ax4.scatter(plot_data[plot_data <= HV_cutoff] + 3 * np.pi/2, scatter_axis_HV, s = 30, color=homing_vector_color[::-1], alpha=.8, edgecolors = None)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.png'), format='png', transparent=True, bbox_inches='tight', pad_inches=0)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.eps'), format='eps', transparent=True, bbox_inches='tight', pad_inches=0)
# print(len(plot_data))
if len(plot_data) > 1 and False: # or True:
''' plot the correlation '''
# do both prev homings and time in center # np.array(time_since_down) # 'Time since removal'
for plot_data_corr, fig_corr, ax_corr, data_label in zip([prev_edginess, time_in_center], [fig2, fig3], [ax2, ax3], ['Prior homings','Exploration']): #
plot_data_corr = plot_data_corr[~np.isnan(data)]
# plot data
ax_corr.scatter(plot_data_corr, plot_data, color=colors[c], s=60, alpha=1, edgecolors=colors[c]/2, linewidth=1) #color=[.5, .5, .5] #edgecolors=[.2, .2, .2]
# do correlation
r, p = scipy.stats.pearsonr(plot_data_corr, plot_data)
print(r, p)
# do linear regression
plot_data_corr, prediction = do_linear_regression(plot_data, plot_data_corr)
# plot linear regresssion
ax_corr.plot(plot_data_corr, prediction['Pred'].values, color=colors[c], linewidth=1, linestyle='--', alpha=.7) #color=[.0, .0, .0]
ax_corr.fill_between(plot_data_corr, prediction['lower'].values, prediction['upper'].values, color=colors[c], alpha=.075) #color=[.2, .2, .2]
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.png'), format='png')
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# test correlation and stats thru permutation test
# data_x = list(np.array(all_data[2])[np.array(all_conditions) == c])
# data_y = list(np.array(all_data[1])[np.array(all_conditions) == c])
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all = True)
print(num_trials_escape)
print(num_trials_total)
print(num_trials_escape / num_trials_total)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
time_to_shelter_all = np.concatenate(list(flatten(time_to_shelter_all))).astype(float)
np.percentile(time_to_shelter_all, 25)
np.percentile(time_to_shelter_all, 75)
group_A = list(np.array(all_data[0])[np.array(all_conditions) == 2])
group_B = list(np.array(all_data[0])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
group_A = list(np.array(all_data[1])[(np.array(all_conditions) == 1) + (np.array(all_conditions) == 2)])
group_B = list(np.array(all_data[1])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
import pandas
df = pandas.DataFrame(data={"mouse_id": mouse_ID, "condition": all_conditions, "x-data": all_data[2], "y-data": all_data[1]})
df.to_csv("./Foraging Path Types.csv", sep=',', index=False)
group_B = list(flatten(np.array(all_data[0])[np.array(all_conditions) == 1]))
np.sum(group_B) / len(group_B)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 75)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 75)
group_A = [[d] for d in abs(time_since_down[edginess > HV_cutoff])]
group_B = [[d] for d in abs(time_since_down[edginess < HV_cutoff])]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
WE = np.concatenate(was_escape)
TTS_spont = np.concatenate(time_to_shelter)[~WE]
TTS_escape = np.concatenate(time_to_shelter)[WE]
trials = np.array(list(flatten(all_data[3])))
edgy = np.array(list(flatten(all_data[0])))
np.mean(edgy[trials == 0])
np.mean(edgy[trials == 1])
np.mean(edgy[trials == 2])
np.mean(edgy[trials == 3])
np.mean(edgy[trials == 4])
np.mean(edgy[trials == 5])
np.mean(edgy[trials == 6])
np.mean(edgy[trials == 7])
np.mean(edgy[trials == 8])
np.mean(edgy[trials == 9])
np.mean(edgy[trials == 10])
np.mean(edgy[trials == 11])
np.mean(edgy[trials == 12])
np.mean(edgy[trials == 13])
'''
TRADITIONAL METRICS
'''
def plot_metrics_by_strategy(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
non_escape_color = np.array([0,0,0])
ETD = 10#0
traj_loc = 40
fps = 30
# escape_duration = 12 #12 #9 #12 9 for food 12 for dark
HV_cutoff = .681 #.65
edgy_cutoff = .681
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, duration_RT, duration, prev_edginess, edginess, _, _, _, _, \
_, _, _, _, _, scaling_factor, time, trial_num, trials, edginess, avg_speed, avg_speed_RT, peak_speed, RT, escape_speed, strategy = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
mouse_id = efficiency.copy()
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop across all trials
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
if 'food' in experiment: escape_duration = 9
else: escape_duration = 12
trial_num += 1
# impose coniditions - escape duration
end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# needs to start at top
if y_start > 25: continue
if abs(x_start - 50) > 30: continue
# get the strategy used
# edgy_escape = self.analysis[experiment][condition]['edginess'][mouse][trial] > edgy_cutoff
# is it a homing vector
# strategy_code = 0
# TEMPORARY COMMENTING
# if not edgy_escape:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] < HV_cutoff: strategy_code = 0 # homing vector
# else: continue
# else:
# get the strategy used -- NUMBER OF PREVIOUS EDGE VECTOR HOMINGS
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, [], [],
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
if t > 2: continue
# if c == 0 and trial: continue
# if c == 1 and trial != 2: continue
t+=1
# if prev_edginess[trial_num] >= HV_cutoff: strategy_code = 1 # path learning
# elif prev_edginess[trial_num] < HV_cutoff: strategy_code = 2 # map-based
# else: continue
# how many prev homings to that edge: if 0, then map-based, if >1, then PL
if len(self.analysis[experiment]['probe']['start time'][mouse]):
edge_time = self.analysis[experiment]['probe']['start time'][mouse][0] - 1
else: edge_time = 19
edge_time = np.min((edge_time, self.analysis[experiment][condition]['start time'][mouse][trial]))
# print(edge_time)
num_edge_vectors, _ = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD=ETD, time_threshold=edge_time, other_side = False)
num_edge_vectors = get_num_homing_vectors(self, experiment, condition, mouse, trial, spontaneous = False, time_threshold = edge_time)
print(num_edge_vectors)
# if 'wall up' in experiment and 'no' in condition: num_edge_vectors = 0
# print(num_edge_vectors)
if False or True:
if num_edge_vectors == 1:
strategy_code = 1
# print('EV -- ' + mouse + ' - trial ' + str(trial))
elif num_edge_vectors == 0:
strategy_code = 0
# print('NO EV -- ' + mouse + ' - trial ' + str(trial))
else: continue
else:
strategy_code = 0
strategy[trial_num] = strategy_code
# add data for each metric
RT[trial_num] = self.analysis[experiment][condition]['RT'][mouse][trial]
avg_speed[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)]) * scaling_factor * 30
avg_speed_RT[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps + int(RT[trial_num]*30) : 10*fps+int(end_time)]) * scaling_factor * 30
peak_speed[trial_num] = np.max(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)])*fps*scaling_factor
escape_speed[trial_num] = self.analysis[experiment][condition]['optimal path length'][mouse][trial] * scaling_factor / (end_time/30)
efficiency[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal path length'][mouse][trial] / \
self.analysis[experiment][condition]['full path length'][mouse][trial]))
efficiency_RT[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / \
self.analysis[experiment][condition]['RT path length'][mouse][trial]))
duration_RT[trial_num] = (end_time / fps - RT[trial_num]) / self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / scaling_factor * 100
duration[trial_num] = end_time / fps / self.analysis[experiment][condition]['optimal path length'][mouse][trial] / scaling_factor * 100
# duration[trial_num] = trial
# duration_RT[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial]
avg_speed[trial_num] = self.analysis[experiment][condition]['time exploring far (pre)'][mouse][trial] / 60
# add data for stats
mouse_id[trial_num] = m
m+=1
# for metric, data in zip(['Reaction time', 'Peak speed', 'Avg speed', 'Path efficiency - RT','Duration - RT', 'Duration'],\
# [RT, peak_speed, avg_speed_RT, efficiency_RT, duration_RT, duration]):
# for metric, data in zip(['Reaction time', 'Avg speed', 'Path efficiency - RT'], #,'Peak speed', 'Duration - RT', 'Duration'], \
# [RT, avg_speed_RT, efficiency_RT]): #peak_speed, , duration_RT, duration
for metric, data in zip(['Path efficiency - RT'], [efficiency_RT]):
# for metric, data in zip([ 'Duration - RT'],
# [ duration_RT]):
# for metric, data in zip(['trial', 'time', 'time exploring back'],
# [duration, duration_RT, avg_speed]):
# format data
x_data = strategy[~np.isnan(data)]
y_data = data[~np.isnan(data)]
if not c: OF_data = y_data
# make figure
fig, ax = plt.subplots(figsize=(11, 9))
plt.axis('off')
# ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
# ax.set_title(metric)
if 'Reaction time' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [2, 2], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [3, 3], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [4, 4], linestyle='--', color=[.5, .5, .5, .5])
elif 'Peak speed' in metric:
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [80, 80], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [120, 120], linestyle='--', color=[.5, .5, .5, .5])
elif 'Avg speed' in metric:
ax.plot([-.75, 3], [25, 25], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [50, 50], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [75, 75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
elif 'Path efficiency' in metric:
ax.plot([-.75, 3], [.5,.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [.75, .75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
elif 'Duration' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5, 5], linestyle='--', color=[.5, .5, .5, .5])
elif 'time' == metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [20, 20], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [30, 30], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
elif 'exploring' in metric:
ax.plot([-.75, 3], [2.5, 2.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5.0, 5.0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [7.5, 7.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
#initialize stats array
stats_data = [[], [], []]
# go thru each strategy
for s in [0,1,2]:
# format data
if not np.sum(x_data==s): continue
plot_data = y_data[x_data==s]
median = np.percentile(plot_data, 50);
third_quartile = np.percentile(plot_data, 75);
first_quartile = np.percentile(plot_data, 25)
# print(first_quartile)
# print(median)
# print(third_quartile)
# if 'Reaction' in metric: print(str(first_quartile), str(median), str(third_quartile))
IQR = third_quartile - first_quartile
# remove outliers
if not metric == 'trial':
outliers = abs(plot_data - median) > 2*IQR
# plot_data = plot_data[~outliers]
# plot all data
ax.scatter(np.ones_like(plot_data)*s, plot_data, color=[0,0,0], s=30, zorder = 99)
# plot kde
if 'efficiency' in metric: bw_factor = .02
elif 'speed' in metric or 'efficiency' in metric or metric == 'time': bw_factor = .04
elif 'exploring' in metric: bw_factor = .06
elif 'Duration' in metric: bw_factor = .07
else: bw_factor = .09
kde = fit_kde(plot_data, bw=np.median(y_data)*bw_factor)
plot_kde(ax, kde, plot_data, z= s + .1, vertical=True, normto=.4, color=[.75, .75, .75], violin=False, clip=True)
# plot errorbar
ax.errorbar(s - .15, median, yerr=np.array([[median - first_quartile], [third_quartile - median]]), color=[0, 0, 0], capsize=10, capthick=3, alpha=1, linewidth=3)
ax.scatter(s - .15, median, color=[0, 0, 0], s=175, alpha=1)
# print(len(plot_data))
# get mouse ids for stats
mouse_id_stats = mouse_id[~np.isnan(data)]
mouse_id_stats = mouse_id_stats[x_data==s]
if not metric == 'trial': mouse_id_stats = mouse_id_stats[~outliers]
# for m in np.unique(mouse_id_stats):
# stats_data[s].append( list(plot_data[mouse_id_stats==m]) )
print(metric)
# for ss in [[0,1]]: #, [0,2], [1,2]]:
# group_A = stats_data[ss[0]]
# group_B = stats_data[ss[1]]
# permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
# save figure
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
plt.close('all')
group_A = [[e] for e in tr1_eff]
group_B = [[e] for e in tr3_eff]
group_C = [[e] for e in OF_eff]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
permutation_test(group_A, group_C, iterations=10000, two_tailed=True)
permutation_test(group_B, group_C, iterations=10000, two_tailed=True)
'''
DIST OF TURN ANGLES
'''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig, ax = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_angles_pre = []
# all_angles_escape = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# turn_angles_pre = []
# turn_angles_escape = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # loop across all trials
# t = 0
# for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
# # impose coniditions - escape duration
# end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
# if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
#
#
# ## COMMENT ONE OR THE OTHER IF TESTING PRE OR ESCAPE
# #pre
# # if trial < 2: continue
# # if t: continue
#
# # escape
# if t > 2: continue
#
# # skip certain trials
# y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
# x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# # needs to start at top
# if y_start > 25: continue
# if abs(x_start - 50) > 30: continue
#
# turn_angles_pre.append(list(abs(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))) # >145
# turn_angles_escape.append(abs(self.analysis[experiment][condition]['movement'][mouse][trial][2])) # >145
# #
# # turn_angles_pre.append(list(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))
# # turn_angles_escape.append(self.analysis[experiment][condition]['movement'][mouse][trial][2])
#
# t += 1
#
#
#
# # format data
# hist_data_pre = np.array(list(flatten(turn_angles_pre)))
# hist_data_escape = np.array(list(flatten(turn_angles_escape)))
#
# # for permutation test
# # all_angles_pre.append(turn_angles_pre)
# # all_angles_escape.append([[tae] for tae in turn_angles_escape])
#
# ax.set_title('Prior movement angles')
# ax2.set_title('Escape movement angles')
# ax.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
#
# # format data
# bin_width = 30
# hist_pre, n, _ = ax.hist(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_pre) * 1/ len(hist_data_pre))
# hist_escape, n, _ = ax2.hist(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_escape) * 1/ len(hist_data_escape))
#
# count_pre, n = np.histogram(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width))
# count_escape, n = np.histogram(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width))
#
# # for chi squared
# all_angles_pre.append(count_pre)
# all_angles_escape.append(count_escape)
#
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# # save figure
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
# plt.show()
#
#
# scipy.stats.chi2_contingency(all_angles_pre)
# scipy.stats.chi2_contingency(all_angles_escape)
#
#
# group_A = all_angles_pre[0]
# group_B = all_angles_pre[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_angles_escape[0]
# group_B = all_angles_escape[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
#
# '''
# DIST OF EDGE VECTORS
# '''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# dist_thresh = 5
# time_thresh = 20
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig1, ax1 = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_EVs = []
# all_HVs = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# EVs = []
# HVs = []
# edge_vector_time_exp = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # just take the last trial
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if trial < 0:
# if condition == 'obstacle':
# condition_use = 'no obstacle'
# trial = 0
# elif condition == 'no obstacle':
# condition_use = 'obstacle'
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if mouse == 'CA7220': trial = 1 #compensate for extra vid
# else: condition_use = condition
#
# # get the prev homings
# SH_data = self.analysis[experiment][condition_use]['prev homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# SH_x = np.array(SH_data[0])
#
# # only use spontaneous
# stim_evoked = np.array(SH_data[4])
# SH_x = SH_x[~stim_evoked]
# homing_time = homing_time[~stim_evoked]
#
# # normalize to 20 min
# SH_x = SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_edge_vectors = np.sum(abs(SH_x - 25) < dist_thresh) + np.sum(abs(SH_x - 75) < dist_thresh)
# num_homing_vectors = np.sum(abs(SH_x - 50) < dist_thresh)
# print(num_edge_vectors)
#
#
# # get the prev anti homings
# anti_SH_data = self.analysis[experiment][condition_use]['prev anti-homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(anti_SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# anti_SH_x = np.array(anti_SH_data[0])
#
# # limit to 20 min
# anti_SH_x = anti_SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_anti_edge_vectors = np.sum(abs(anti_SH_x - 25) < dist_thresh) + np.sum(abs(anti_SH_x - 75) < dist_thresh)
# num_anti_homing_vectors = np.sum(abs(anti_SH_x - 50) < dist_thresh)
# print(num_anti_edge_vectors)
#
# # append to list
# EVs.append(num_edge_vectors + num_anti_edge_vectors )
# HVs.append(num_edge_vectors + num_anti_edge_vectors - (num_homing_vectors + num_anti_homing_vectors))
# print(EVs)
# all_EVs.append(EVs)
# all_HVs.append(HVs)
#
# # make timing hist
# plt.figure()
# plt.hist(list(flatten(edge_vector_time_exp)), bins=np.arange(0, 22.5, 2.5)) #, color=condition_colors[c])
#
# # plot EVs and HVs
# for plot_data, ax, fig in zip([EVs, HVs], [ax1, ax2], [fig1, fig2]):
#
# scatter_axis = scatter_the_axis(c * 4 / 3 + .5 / 3, plot_data)
# ax.scatter(scatter_axis, plot_data, color=[0, 0, 0], s=25, zorder=99)
# # do kde
# kde = fit_kde(plot_data, bw=.5)
# plot_kde(ax, kde, plot_data, z=4 * c + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=False) # True)
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
#
# plt.show()
#
#
# group_A = all_EVs[1]
# group_B = all_EVs[2]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_HVs[0]
# group_B = all_HVs[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
'''
PREDICTION PLOTS, BY TURN ANGLE OR EXPLORATION/EDGINESS
|
|
v
'''
def plot_prediction(self):
by_angle_not_edginess = False
if by_angle_not_edginess:
# initialize parameters
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
plt.close(fig2); plt.close(fig3)
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
IC_x_all, IC_y_all, IC_angle_all, IC_time_all, turn_angles_all = [], [], [], [], []
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, x_pred, y_pred, angle_pred, time_pred, mean_pred, initial_body_angle, initial_x, initial_y, x_edge, _, \
_, _, _, _, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# initialize array to fill in with each trial's data
edginess, end_idx, angle_turned, _, _, prev_edginess, scaling_factor, _, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
for shuffle_time in [False, True]:
angle_turned_all, x_pred_all, y_pred_all, angle_pred_all, time_pred_all, mean_pred_all = [], [], [], [], [], []
num_repeats = shuffle_time * 499 + 1 #* 19
num_repeats = shuffle_time * 19 + 1 # * 19
prediction_scores_all = []
for r in range(num_repeats):
trial_num = -1
# loop over each experiment and condition
for e, (experiment_real, condition_real) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse_real in enumerate(self.analysis[experiment_real][condition_real]['start time']):
if self.analysis_options['control'] and not mouse_real=='control': continue
if not self.analysis_options['control'] and mouse_real=='control': continue
# loop over each trial
prev_homings = []
t = 0
for trial_real in range(len(self.analysis[experiment_real][condition_real]['end time'][mouse_real])):
trial_num += 1
# impose conditions
if t > 2: continue
end_idx[trial_num] = self.analysis[experiment_real][condition_real]['end time'][mouse_real][trial_real]
if np.isnan(end_idx[trial_num]): continue
if (end_idx[trial_num] > escape_duration * fps): continue
# skip certain trials
y_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][1][0] * scaling_factor
x_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
# if shuffle_time:
# experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
# else:
# experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' just use real mouse '''
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' control ICs, real escape '''
# # get the angle turned during the escape
angle_turned[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][2]
# angle_turned[trial_num] = abs(self.analysis[experiment_real][condition_real]['edginess'][mouse_real][trial_real])
# get the angle turned, delta x, delta y, and delta phi of previous homings
bout_start_angle = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
bout_start_position = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0]
start_time = self.analysis[experiment_real][condition_real]['start time'][mouse_real][trial_real]
# get initial conditions and endpoint quantities
IC_x = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][0][-ETD:])
IC_y = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][1][-ETD:])
IC_angle = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][2][-ETD:])
IC_time = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][3][-ETD:])
turn_angles = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3][-ETD:])
# MOE = 10
# x_edge_trial = self.analysis[experiment][condition]['x edge'][mouse][trial]
# SH_x = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][0][-ETD:])
# if x_edge_trial > 50 and np.sum(SH_x > 25 + MOE):
# IC_x = IC_x[SH_x > 25 + MOE]
# IC_y = IC_y[SH_x > 25 + MOE]
# IC_angle = IC_angle[SH_x > 25 + MOE]
# IC_time = IC_time[SH_x > 25 + MOE]
# turn_angles = turn_angles[SH_x > 25 + MOE]
# elif np.sum(SH_x > 75 - MOE):
# IC_x = IC_x[SH_x > 75 - MOE]
# IC_y = IC_y[SH_x > 75 - MOE]
# IC_angle = IC_angle[SH_x > 75 - MOE]
# IC_time = IC_time[SH_x > 75 - MOE]
# turn_angles = turn_angles[SH_x > 75 - MOE]
if not shuffle_time: # gather previous movements
IC_x_all = np.concatenate((IC_x_all, IC_x))
IC_y_all = np.concatenate((IC_y_all, IC_y))
IC_angle_all = np.concatenate((IC_angle_all, IC_angle))
IC_time_all = np.concatenate((IC_time_all, IC_time))
turn_angles_all = np.concatenate((turn_angles_all, turn_angles))
else:
# sample randomly from these movements
random_idx = np.random.choice(len(IC_x_all), len(IC_x_all), replace = False)
IC_x = IC_x_all[random_idx]
IC_y = IC_y_all[random_idx]
IC_angle = IC_angle_all[random_idx]
IC_time = IC_time_all[random_idx]
turn_angles = turn_angles_all[random_idx]
# calculate difference in ICs
delta_x = abs( np.array(IC_x - bout_start_position[0]) )
delta_y = abs( np.array(IC_y - bout_start_position[1]) )
delta_angle = abs( np.array(IC_angle - bout_start_angle) )
delta_angle[delta_angle > 180] = 360 - delta_angle[delta_angle > 180]
delta_time = start_time - | np.array(IC_time) | numpy.array |
import gym
from gym import wrappers
import numpy as np
import tensorflow as tf
# init Gym
env = gym.make('Reacher-v1')
env.seed(0)
env.reset()
env.render()
# init Variables
max_episodes = 100000
batch_size = 1000
outdir = './log/'
num_observation = env.observation_space.shape[0]
num_action = env.action_space.shape[0]
# start Monitor
env = wrappers.Monitor(env, directory=outdir, force=True)
# TensorFlow
# https://www.tensorflow.org/get_started/mnist/pros
#https://github.com/hunkim/ReinforcementZeroToAll/blob/master/08_2_softmax_pg_cartpole.py
hidden_layer = 1000
learning_rate = 1e-3
gamma = .995
X = tf.placeholder(tf.float32, [None, num_observation], name="input_x")
W1 = tf.get_variable("W1", shape=[num_observation, hidden_layer],
initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(X, W1))
W2 = tf.get_variable("W2", shape=[hidden_layer, num_action],
initializer=tf.contrib.layers.xavier_initializer())
action_pred = tf.nn.softmax(tf.matmul(layer1, W2))
Y = tf.placeholder(tf.float32, [None, num_action], name="input_y")
advantages = tf.placeholder(tf.float32, name="reward_signal")
log_lik = -Y * tf.log(action_pred)
log_lik_adv = log_lik * advantages
loss = tf.reduce_mean(tf.reduce_sum(log_lik_adv, axis=1))
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# dicount reward function
def discount_rewards(r, gamma=0.99):
"""Takes 1d float array of rewards and computes discounted reward
e.g. f([1, 1, 1], 0.99) -> [1, 0.99, 0.9801] -> [1.22 -0.004 -1.22]
"""
d_rewards = np.array([val * (gamma ** i) for i, val in enumerate(r)])
# Normalize/standardize rewards
d_rewards -= d_rewards.mean()
d_rewards /= d_rewards.std()
return d_rewards
#return r
# run TensorFlow and TensorBoard
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# run Gym
ary_state = np.empty(0).reshape(0, num_observation)
ary_action = np.empty(0).reshape(0, num_action)
ary_reward = np.empty(0).reshape(0, 1)
for episode in range(max_episodes):
done = False
cnt_step = 0
ob = env.reset()
'''
for t in range(100):
env.render()
action = env.action_space.sample()
ob, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
'''
while not done:
if episode % (batch_size/4) == 0:
env.render()
x = np.reshape(ob, [1, num_observation])
ary_state = np.vstack([ary_state, x])
action_prob = sess.run(action_pred, feed_dict={X: x})
action_prob = np.squeeze(action_prob)
random_noise = | np.random.uniform(-1, 1, num_action) | numpy.random.uniform |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 16:02:58 2022
@author: erri
"""
import os
import numpy as np
import math
from morph_quantities_func_v2 import morph_quantities
import matplotlib.pyplot as plt
# SINGLE RUN NAME
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
# Step between surveys
DoD_delta = 1
# Base length in terms of columns. If the windows dimensions are channel width
# multiples, the windows_length_base is 12 columns
windows_length_base = 12
window_mode = 1
'''
windows_mode:
0 = fixed windows (all the channel)
1 = expanding window
2 = floating fixed windows (WxW, Wx2W, Wx3W, ...) without overlapping
3 = floating fixed windows (WxW, Wx2W, Wx3W, ...) with overlapping
'''
plot_mode = 2
'''
plot_mode:
1 = only summary plot
2 = all single DoD plot
'''
# Parameters
# Survey pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
W = 0.6 # Width [m]
d50 = 0.001
NaN = -999
# setup working directory and DEM's name
home_dir = os.getcwd()
# Source DoDs folder
DoDs_folder = os.path.join(home_dir, 'DoDs', 'DoD_'+run)
DoDs_name_array = [] # List the file's name of the DoDs with step of delta_step
for f in sorted(os.listdir(DoDs_folder)):
if f.endswith('_filt_nozero_rst.txt') and f.startswith('DoD_'):
delta = eval(f[5]) - eval(f[8])
if delta == DoD_delta:
DoDs_name_array = np.append(DoDs_name_array, f)
else:
pass
# Initialize overall arrays
dep_vol_w_array_all = []
sco_vol_w_array_all = []
# Loop over the DoDs with step of delta_step
for f in DoDs_name_array:
DoD_name = f
print(f)
DoD_path = os.path.join(DoDs_folder,DoD_name)
DoD_filt_nozero = np.loadtxt(DoD_path, delimiter='\t')
# DoD length
DoD_length = DoD_filt_nozero.shape[1]*px_x/1000 # DoD length [m]
dim_x = DoD_filt_nozero.shape[1]
# Initialize array
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix
DoD_vol = np.where(DoD_vol==NaN, 0, DoD_vol)
dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data
sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data
# Active pixel matrix:
act_px_matrix = np.where(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition
act_px_matrix_dep = np.where(dep_DoD != 0, 1, 0) # Active deposition matrix
act_px_matrix_sco = np.where(sco_DoD != 0, 1, 0) # Active scour matrix
# Initialize array for each window dimension
###################################################################
# MOVING WINDOWS ANALYSIS
###################################################################
array = DoD_filt_nozero
W=windows_length_base
mean_array_tot = []
std_array_tot= []
window_boundary = np.array([0,0])
x_data_tot=[]
tot_vol_array=[] # Tot volume
tot_vol_mean_array=[]
tot_vol_std_array=[]
sum_vol_array=[] # Sum of scour and deposition volume
dep_vol_array=[] # Deposition volume
sco_vol_array=[] # Scour volume
morph_act_area_array=[] # Total active area array
morph_act_area_dep_array=[] # Deposition active area array
morph_act_area_sco_array=[] # Active active area array
act_width_mean_array=[] # Total active width mean array
act_width_mean_dep_array=[] # Deposition active width mean array
act_width_mean_sco_array=[] # Scour active width mean array
if window_mode == 1:
# With overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
# Initialize arrays that stock data for each window position
x_data=[]
tot_vol_w_array = []
sum_vol_w_array = []
dep_vol_w_array = []
sco_vol_w_array =[]
morph_act_area_w_array = []
morph_act_area_dep_w_array = []
morph_act_area_sco_w_array = []
act_width_mean_w_array = []
act_width_mean_dep_w_array = []
act_width_mean_sco_w_array = []
act_thickness_w_array = []
act_thickness_dep_w_array = []
act_thickness_sco_w_array = []
for i in range(0, array.shape[1]+1):
if i+w*W <= array.shape[1]:
window = array[:, i:W*w+i]
boundary = np.array([i,W*w+i])
window_boundary = np.vstack((window_boundary, boundary))
x_data=np.append(x_data, w)
# Calculate morphological quantities
tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco = morph_quantities(window)
# Append single data to array
# For each window position the calculated parameters will be appended to _array
tot_vol_w_array=np.append(tot_vol_w_array, tot_vol)
sum_vol_w_array=np.append(sum_vol_w_array, sum_vol)
dep_vol_w_array=np.append(dep_vol_w_array, dep_vol)
sco_vol_w_array=np.append(sco_vol_w_array, sco_vol)
morph_act_area_w_array=np.append(morph_act_area_w_array, morph_act_area)
morph_act_area_dep_w_array=np.append(morph_act_area_dep_w_array, morph_act_area_dep)
morph_act_area_sco_w_array=np.append(morph_act_area_sco_w_array, morph_act_area_sco)
act_width_mean_w_array=np.append(act_width_mean_w_array, act_width_mean)
act_width_mean_dep_w_array=np.append(act_width_mean_dep_w_array, act_width_mean_dep)
act_width_mean_sco_w_array=np.append(act_width_mean_sco_w_array, act_width_mean_sco)
act_thickness_w_array=np.append(act_thickness_w_array, act_thickness)
act_thickness_dep_w_array=np.append(act_thickness_dep_w_array, act_thickness_dep)
act_thickness_sco_w_array=np.append(act_thickness_sco_w_array, act_thickness_sco)
# For each window dimension w*W,
x_data_tot=np.append(x_data_tot, np.nanmean(x_data)) # Append one value of x_data
tot_vol_mean_array=np.append(tot_vol_mean_array, np.nanmean(tot_vol_w_array)) # Append the tot_vol_array mean
tot_vol_std_array=np.append(tot_vol_std_array, np.nanstd(tot_vol_w_array)) # Append the tot_vol_array mean
# sum_vol_array=
# dep_vol_array=
# sco_vol_array=
# morph_act_area_array=
# morph_act_area_dep_array=
# morph_act_area_sco_array=
# act_width_mean_array=
# act_width_mean_dep_array=
# act_width_mean_sco_array=
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
if window_mode == 2:
# Without overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
mean_array = []
std_array= []
x_data=[]
for i in range(0, array.shape[1]+1):
if W*w*(i+1) <= array.shape[1]:
window = array[:, W*w*i:W*w*(i+1)]
boundary = np.array([W*w*i,W*w*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, w)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, | np.nanstd(std_array) | numpy.nanstd |
import os
from numpy import arctan, array, cos, exp, log, sin
from lmfit import Parameters
thisdir, thisfile = os.path.split(__file__)
NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
def read_params(params):
if isinstance(params, Parameters):
return [par.value for par in params.values()]
else:
return params
def Bennet5(b, x, y=0):
b = read_params(b)
return y - b[0] * (b[1]+x)**(-1/b[2])
def BoxBOD(b, x, y=0):
b = read_params(b)
return y - b[0]*(1-exp(-b[1]*x))
def Chwirut(b, x, y=0):
b = read_params(b)
return y - exp(-b[0]*x)/(b[1]+b[2]*x)
def DanWood(b, x, y=0):
b = read_params(b)
return y - b[0]*x**b[1]
def ENSO(b, x, y=0):
b = read_params(b)
pi = 3.141592653589793238462643383279
return y - b[0] + (b[1]*cos(2*pi*x/12) + b[2]* | sin(2*pi*x/12) | numpy.sin |
from pyitab.io.loader import DataLoader
from pyitab.analysis.linear_model import LinearModel
from pyitab.preprocessing.pipelines import PreprocessingPipeline
from pyitab.preprocessing.normalizers import FeatureZNormalizer
from pyitab.preprocessing.functions import SampleAttributeTransformer, TargetTransformer
from pyitab.preprocessing.slicers import SampleSlicer
from pyitab.plot.connectivity import plot_connectivity_matrix
from scipy.stats import zscore
import numpy as np
import warnings
warnings.filterwarnings("ignore")
data_path = '/media/robbis/DATA/meg/viviana-hcp/'
conf_file = "/media/robbis/DATA/meg/viviana-hcp/bids.conf"
loader = DataLoader(configuration_file=conf_file,
data_path=data_path,
subjects="/media/robbis/DATA/meg/viviana-hcp/participants.tsv",
loader='bids-meg',
task='blp',
bids_atlas="complete",
bids_correction="corr",
bids_derivatives='True',
load_fx='hcp-blp')
ds = loader.fetch()
nodes = ds.fa.nodes_1
matrix = np.zeros_like(ds.samples[0])
nanmask = np.logical_not(np.isnan(ds.samples).sum(0))
ds = ds[:, nanmask]
def plot_stats(stat, matrix=matrix, nanmask=nanmask, nodes=nodes, title='stat'):
matrix[nanmask] = stat
_, a = plot_connectivity_matrix(matrix,
networks=nodes,
cmap=pl.cm.viridis,
vmin=0.)
a.set_title(title)
# Transform dataset to have mean 0 and std 1
prepro = [
SampleSlicer(task=['rest', 'task1', 'task2', 'task4', 'task5']),
FeatureZNormalizer(),
SampleAttributeTransformer(attr='dexterity1', fx=('zscore', zscore)),
SampleAttributeTransformer(attr='dexterity2', fx=('zscore', zscore)),
]
ds = PreprocessingPipeline(nodes=prepro).transform(ds)
##################
# 1. Full model (band + task + subject + dexterity)
n_bands = len(np.unique(ds.sa.mainband))
n_tasks = len(np.unique(ds.sa.maintask))
n_subjects = len(np.unique(ds.sa.subject))
dexterity = 1
band_contrast = np.zeros((n_bands-1, n_bands+n_tasks+n_subjects+1))
band_contrast[0, 1:4] = [1, -1, 0]
band_contrast[1, 1:4] = [0, 1, -1]
band_contrast[:, 4:7] = 1./n_tasks
band_contrast[:, 7:] = 1./n_subjects
task_contrast = np.zeros((n_tasks-1, n_bands+n_tasks+n_subjects+1))
task_contrast[0, 4:5] = [1, -1]
task_contrast[1, 5:7] = [1, -1]
index = 7
subject_contrast = np.zeros((n_subjects-1, n_bands+n_tasks+n_subjects+1))
for i in range(n_subjects-1):
subject_contrast[i, index:index+2] = [1, -1]
index += 1
contrasts = {
't+dexterity': np.hstack([1, np.zeros(n_bands), np.zeros(n_tasks), np.zeros(n_subjects)]),
'f+band': band_contrast,
'f+task': task_contrast,
'f+subject': subject_contrast,
}
lm = LinearModel(attr=['mainband', 'maintask', 'subject', 'dexterity1'])
lm.fit(ds, full_model=True)
lm._contrast(contrast=contrasts)
plot_stats(lm.scores.r_square, title='r2')
# stats
stats = lm.scores.stats_contrasts
for contrast, stats in lm.scores.stats_contrasts.items():
if contrast[0] == 'f':
test = 'F'
else:
test = 't'
s = stats[test]
p = stats['p_values']
t = 0.001 / s.shape[0]
s[p > t] = 0
plot_stats(s, title=contrast)
#######################################################
# 2. Modulation of tasks within bands.
contrasts = {
#'t+restvstask': [1, -1/4, -1/4, -1/4, -1/4, 0],
'f+restvstask': [1, -1/4, -1/4, -1/4, -1/4, 0],
'f+task': [[1,-1, 0, 0, 0, 0],
[0, 1,-1, 0, 0, 0],
[0, 0, 1,-1, 0, 0],
[0, 0, 0, 1,-1, 0]],
't+rest': [1, 0, 0, 0, 0, 0],
#'t+1task': [0, 1, 0, 0, 0, 0],
#'t+2task': [0, 0, 1, 0, 0, 0],
#'t+4task': [0, 0, 0, 1, 0, 0],
#'t+5task': [0, 0, 0, 0, 1, 0],
#'t+handvsfoot': [0, 1/2, -1/2, 1/2, -1/2, 0],
#'f+handvsfoot': [0, 1/2, -1/2, 1/2, -1/2, 0],
#'t+movement': [0, 1/4, 1/4, 1/4, 1/4, 0],
#'f+movement': [0, 1/4, 1/4, 1/4, 1/4, 0],
't+dexterity': [0, 0, 0, 0, 0, 1],
#'t+taskvsdext': [1/5, 1/5, 1/5, 1/5, 1/5, -1],
#'f+taskvsdext': [1/5, 1/5, 1/5, 1/5, 1/5, -1],
}
import seaborn as sns
color2 = "#F21A00"
color1 = "#3B9AB2"
tpalette = sns.blend_palette([color1, "#EEEEEE", color2], n_colors=100, as_cmap=True)
fpalette = sns.blend_palette(["#EEEEEE", color2], n_colors=100, as_cmap=True)
for band in ['alpha', 'beta', 'gamma']:
ds_ = SampleSlicer(mainband=[band]).transform(ds)
ds_ = PreprocessingPipeline(nodes=prepro).transform(ds_)
lm = LinearModel(attr=['task', 'dexterity1'])
lm.fit(ds_, formula='task + dexterity1 - 1')
lm._contrast(contrast=contrasts)
title = band
"""
r2 = lm.scores.r_square
matrix[nanmask] = r2
_, a = plot_connectivity_matrix(matrix,
networks=nodes,
cmap=pl.cm.viridis,
vmin=0.)
a.set_title(band+" | r2")
"""
# stats
stats = lm.scores.stats_contrasts
for contrast, stats in lm.scores.stats_contrasts.items():
if contrast[0] == 'f':
test = 'F'
cmap = fpalette
vmin = 0
else:
test = 't'
cmap = tpalette
s = stats[test]
p = stats['p_values']
t = 0.05 / s.shape[0]
if contrast[0] == 't':
vmin = -1*np.max( | np.abs(s) | numpy.abs |
# implementation biological basics like thermal time
def GDD(df, Tlo=10., Thi=44., method='I', **kwargs):
"""
Wrapper for daily thermal time (GDD) calculations
Method I: takes sub-daily resolved data (eg from Mark)
Method II: takes daily max/min data (eg from ERA, GFS)
Method III: takes daily avg + range data (eg from NMME)
kwargs is used to rename column name for Tmax, Tmin, Tair, Tavg
"""
if method=='I':
# Note, takes sub-daily and returns daily df
df = GDD_I(df, Tlo, Thi, **kwargs)
elif method=='II':
df = GDD_II(df, Tlo, Thi, **kwargs)
elif method=='III':
df = GDD_III(df, Tlo, Thi, **kwargs)
elif method=='IV':
df = GDD_IV(df, Tlo, **kwargs)
else:
# error: must use a method
return df
return df.V.tolist()
def GDD_I(df, Tlo=10., Thi=44., **kwargs):
"""
Method I: Takes T and aggregates by day
expects T to be in a dataframe that has a time index and the name of T as 'Tair'
takes kwargs Tcol
# Note, takes sub-daily and returns daily df
"""
import pandas as pd
from numpy import max, min
if 'Tcol' in kwargs:
Tval = kwargs['Tcol']
else:
Tval = 'Tair'
df['V'] = max(min(df[Tval], Thi), Tlo)
# handles data hourly, 3 hourly, 6 hourly or irregular
df = df.resample('60T').mean()
df = df.groupby(pd.Grouper(freq='D')).mean()
return df
def GDD_II(df, Tlo=10., Thi=44., **kwargs):
"""
Method II: Takes one Tmin, Tmax per day
Compute degree days using single sin method and horizontal cutoff
Takes account of 6 cases:
1. Above both thresholds.
2. Below both thresholds.
3. Between both thresholds.
4. Intercepted by the lower threshold.
5. Intercepted by the upper threshold.
6. Intercepted by both thresholds.
cases 4-6 assume a form that includes a
LEFT half (positive sin from 0 to pi added to Tavg-Tlo)
RIGHT half (negative sin from pi to 2pi)
for the purposes of this calculation, a day is 2*pi long
takes kwargs Tmaxcol and Tmincol
Example invocation with kwargs to rename columns:
df = GDD_II(df, Tlo=10, Tmaxcol='Tmx', Tmincol='Tmn')
"""
import numpy as np
import pandas as pd
from numpy import cos, arcsin, pi
import warnings
warnings.simplefilter("ignore")
if 'Tmincol' in kwargs:
Tmin = kwargs['Tmincol']
else:
Tmin = 'Tmin'
if 'Tmaxcol' in kwargs:
Tmax = kwargs['Tmaxcol']
else:
Tmax = 'Tmax'
twopi = 2*pi
case = np.zeros(len(df))
case[(df[Tmax] < Thi) & (df[Tmin] >= Tlo)] = 3
case[(df[Tmax] < Thi) & (df[Tmin] < Tlo)] = 4
case[(df[Tmax] >= Thi) & (df[Tmin] >= Tlo)] = 5
case[(df[Tmax] >= Thi) & (df[Tmin] < Tlo)] = 6
case[df[Tmin] >= Thi] = 1
case[df[Tmax] < Tlo] = 2
df['debug'] = case
Tavg = (df[Tmax]+df[Tmin])/2.
a = (df[Tmax]-df[Tmin])/2. # half amplitude of sin
b = (Tavg-Tlo) # offset of sin from Tlo
c = (Thi - Tavg)
df['V'] = 0.
case3_temp = np.where(case==3, Tavg-Tlo, 0)
df.loc[df.debug == 3, 'V'] = case3_temp[case3_temp != 0]
# df.V.loc[case == 1] = 0.
# df.V.loc[case == 2] = 0.
# df.V.loc[case == 3] = Tavg - Tlo
# if out-of-case, the arcsin arguments are guaranteed to cause a warning
warnings.simplefilter("ignore")
# Case 4
LEFT = (pi*b + 2*a)/twopi
tao = pi+arcsin(b/a)
RIGHT = 2*((tao-pi)*b + a*(cos(pi) - cos(tao)))/twopi
left_right4 = np.where(case==4, LEFT + RIGHT, 0)
df.loc[left_right4 != 0, 'V'] = left_right4[left_right4 != 0]
# df.V.loc[case == 4] = LEFT + RIGHT
# Case 5
tao2 = arcsin(c/a)
LEFT = ((pi-2*tao2)*c + pi*b + 2*a*(1-cos(tao2)))/twopi
RIGHT = (pi*b - 2*a)/twopi
left_right5 = np.where(case==5, LEFT + RIGHT, 0)
df.loc[df.debug == 5, 'V'] = left_right5[left_right5 != 0]
# df.V[case == 5] = LEFT + RIGHT
# Case 6
tao2 = arcsin(c/a)
LEFT = ((pi-2*tao2)*c + pi*b + 2*a*(1-cos(tao2)))/twopi
tao = pi+ | arcsin(b/a) | numpy.arcsin |
import numpy as np
import time
#from scipy.linalg import sqrtm
ABSERR = 10E-10
def compute_psd_factorization(X,r,nIterates=100,method='multiplicative',Init = None,silent=False):
n1,n2 = X.shape
if Init is None:
A = gen_psdlinmap(n1,r)
B = gen_psdlinmap(n2,r)
else:
A,B = Init
Errs = np.zeros((nIterates,))
start = time.process_time()
if not(silent):
print(' It. # | Error | Time Taken')
for ii in range(nIterates):
t_start = time.time()
if method == 'multiplicative':
try:
B = update_multiplicative(A, B, X)
except:
print('d')
B = update_multiplicative_damped(A, B, X)
try:
A = update_multiplicative(B, A, X.T)
except:
print('d')
A = update_multiplicative_damped(B, A, X.T)
if method == 'multiplicativeaccelerated':
B = update_multiplicativeaccelerated(A, B, X)
A = update_multiplicativeaccelerated(B, A, X.T)
if method == 'fpgm':
B = update_fpgm(A, B, X, 10)
B = np.real(B)
A = update_fpgm(B, A, X.T, 10)
AB = linmap_dot(A, B)
Errs[ii,] = np.linalg.norm(AB-X)/np.linalg.norm(X)
elapsed_time = time.time() - t_start
np.save('A.npy',A)
np.save('B.npy',B)
| np.save('Errs.npy',Errs) | numpy.save |
import cv2
import numpy as np
import sizes
class DocumentSignatureDetector:
_DPI = 300
def __init__(self, j_doc):
self.j_doc = j_doc
def find_signatures(self):
doc = cv2.imread(self.j_doc["pdf_page"])
scan = cv2.imread(self.j_doc["scan_page"])
self._DPI = sizes.calc_page_dpi(scan.shape[:2])
matched = self._match_scanned_page(doc, scan)
#self._display_image(matched)
difference_contours = self._diff_pages(doc, matched)
color_contours = self._test_color(matched)
signature_rectangles = self._create_signatures_rectangles(self.j_doc)
signatures_mask = self._create_rect_mask(doc.shape, signature_rectangles)
#self._display_image(signatures_mask)
color_mask = self._create_contours_mask(doc.shape, color_contours)
difference_mask = self._create_contours_mask(doc.shape, difference_contours)
# self._display_image(cv2.bitwise_and(signatures_mask, color_mask))
#self._display_image(cv2.bitwise_and(signatures_mask, difference_mask))
signatures_checking = []
# Заполняем по найденным цветным элементам.
img = cv2.bitwise_and(signatures_mask, color_mask)
for s in signature_rectangles:
x, y, w, h = s
crop_img = img[y:y + h, x:x + w]
if np.sum(crop_img) != 0:
signatures_checking.append({"detected": True})
else:
signatures_checking.append({"detected": False})
# Дополняем по разности изображений.
img = cv2.bitwise_and(signatures_mask, difference_mask)
for i, s in enumerate(signature_rectangles):
x, y, w, h = s
crop_img = img[y:y + h, x:x + w]
if np.sum(crop_img) != 0:
signatures_checking[i] = {"detected": True}
return {"signatures": signatures_checking}
def _display_image(self, img):
cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.resizeWindow("img", 600, 800);
cv2.imshow("img", img)
cv2.waitKey()
def _match_scanned_page(self, document, scanned):
obj = document
img = scanned
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 15, img)
obj = cv2.GaussianBlur(obj, (15, 15), 0)
img = cv2.GaussianBlur(img, (15, 15), 0)
OBJ_RESIZE_RATIO = 1.5
IMG_RESIZE_RATIO = 1.3
h, w, _ = obj.shape
if w > 800:
OBJ_RESIZE_RATIO = w / 800
h, w = img.shape
if w > 800:
IMG_RESIZE_RATIO = w / 800
RESIZE_RATIO = OBJ_RESIZE_RATIO if OBJ_RESIZE_RATIO < IMG_RESIZE_RATIO else IMG_RESIZE_RATIO
h, w, _ = obj.shape
obj = cv2.resize(obj, (int(w / RESIZE_RATIO), int(h / RESIZE_RATIO)))
h, w = img.shape
img = cv2.resize(img, (int(w / RESIZE_RATIO), int(h / RESIZE_RATIO)))
try:
hom = self._get_homography(obj, img)
except:
return np.zeros(document.shape, np.uint8)
s = np.identity(3, dtype=float)
s[0, 0] = RESIZE_RATIO
s[1, 1] = RESIZE_RATIO
h2 = np.dot(np.dot(s, hom), np.linalg.inv(s))
h, w, _ = document.shape
img1_warp = cv2.warpPerspective(scanned, h2, (w, h), flags=cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP)
return img1_warp
def _diff_pages(self, doc, matched):
ITERATIONS = 6
m1 = doc
m2 = matched
m1 = cv2.cvtColor(m1, cv2.COLOR_BGR2GRAY)
m2 = cv2.cvtColor(m2, cv2.COLOR_BGR2GRAY)
cv2.adaptiveThreshold(m1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 15, m1)
cv2.adaptiveThreshold(m2, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 15, m2)
kernel = np.ones((3, 3), np.uint8)
m1 = cv2.dilate(m1, kernel, iterations=ITERATIONS)
m2 = cv2.dilate(m2, kernel, iterations=ITERATIONS)
combined = cv2.bitwise_xor(m1, m2)
combined = cv2.erode(combined, kernel, iterations=ITERATIONS)
if cv2.getVersionMajor() == 3:
# OpenCV 3.4
_, contours, _ = cv2.findContours(combined, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
else:
# OpenCV 4.1.0
contours, _ = cv2.findContours(combined, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
combined = cv2.cvtColor(combined, cv2.COLOR_GRAY2BGR)
good_contours = []
for i, k in enumerate(contours):
_, _, w, h = cv2.boundingRect(k)
if w > sizes.mmToPix(3, self._DPI) and h > sizes.mmToPix(4, self._DPI):
combined = cv2.drawContours(combined, contours, i, (0, 0, 255), cv2.FILLED)
good_contours.append(k)
return good_contours
def _test_color(self, img):
b = cv2.bitwise_not(cv2.extractChannel(img, 0))
g = cv2.bitwise_not(cv2.extractChannel(img, 1))
r = cv2.bitwise_not(cv2.extractChannel(img, 2))
cv2.threshold(b, 64, 255, cv2.THRESH_BINARY, dst=b)
cv2.threshold(g, 64, 255, cv2.THRESH_BINARY, dst=g)
cv2.threshold(r, 64, 255, cv2.THRESH_BINARY, dst=r)
sign = cv2.bitwise_or(cv2.bitwise_or(cv2.bitwise_xor(b, r), cv2.bitwise_xor(b, g)), cv2.bitwise_xor(r, g))
sign = cv2.GaussianBlur(sign, (11, 11), 0.0);
cv2.threshold(sign, 16, 255, cv2.THRESH_BINARY, dst=sign)
if cv2.getVersionMajor() == 3:
# OpenCV 3.4.0
_, contours, _ = cv2.findContours(sign, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
else:
# OpenCV 4.1.0
contours, _ = cv2.findContours(sign, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
combined = cv2.cvtColor(sign, cv2.COLOR_GRAY2BGR)
good_contours = []
for i, k in enumerate(contours):
x, y, w, h = cv2.boundingRect(k)
if w > sizes.mmToPix(3, self._DPI) and h > sizes.mmToPix(5, self._DPI):
combined = cv2.drawContours(combined, contours, i, (0, 0, 255), thickness=cv2.FILLED)
good_contours.append(k)
return good_contours
def _create_signatures_rectangles(self, j_doc):
signatures = []
dpi = self._DPI
for s in j_doc["signatures"]:
left = sizes.mmToPix(s["left"], dpi)
top = sizes.mmToPix(s["top"], dpi)
right = sizes.mmToPix(s["right"], dpi)
bottom = sizes.mmToPix(s["bottom"], dpi)
rectangle = (int(left), int(top), int(right-left), int(bottom-top))
signatures.append(rectangle)
return signatures
def _create_rect_mask(self, size, rectangles):
zero = | np.zeros(size, np.uint8) | numpy.zeros |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the improver.metadata.probabilistic module"""
import unittest
import iris
import numpy as np
from iris.exceptions import CoordinateNotFoundError
from iris.tests import IrisTest
from improver.metadata.probabilistic import (
find_percentile_coordinate,
find_threshold_coordinate,
format_cell_methods_for_diagnostic,
format_cell_methods_for_probability,
get_diagnostic_cube_name_from_probability_name,
get_threshold_coord_name_from_probability_name,
in_vicinity_name_format,
is_probability,
probability_is_above_or_below,
)
from improver.synthetic_data.set_up_test_cubes import (
set_up_percentile_cube,
set_up_probability_cube,
set_up_variable_cube,
)
class Test_probability_is_or_below(unittest.TestCase):
"""Test that the probability_is_above_or_below function correctly
identifies whether the spp__relative_to_threshold attribute is above
or below with the respect to the threshold."""
def setUp(self):
"""Set up data and thresholds for the cubes."""
self.data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
def test_above(self):
""" Tests the case where spp__relative_threshold is above"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="above"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_below(self):
""" Tests the case where spp__relative_threshold is below"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="below"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_greater_than(self):
""" Tests the case where spp__relative_threshold is greater_than"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="greater_than"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_greater_than_or_equal_to(self):
""" Tests the case where spp__relative_threshold is
greater_than_or_equal_to"""
cube = set_up_probability_cube(
self.data,
self.threshold_points,
spp__relative_to_threshold="greater_than_or_equal_to",
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "above")
def test_less_than(self):
""" Tests the case where spp__relative_threshold is less_than"""
cube = set_up_probability_cube(
self.data, self.threshold_points, spp__relative_to_threshold="less_than"
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_less_than_or_equal_to(self):
""" Tests the case where spp__relative_threshold is
less_than_or_equal_to"""
cube = set_up_probability_cube(
self.data,
self.threshold_points,
spp__relative_to_threshold="less_than_or_equal_to",
)
result = probability_is_above_or_below(cube)
self.assertEqual(result, "below")
def test_no_spp__relative_to_threshold(self):
"""Tests it returns None if there is no spp__relative_to_threshold
attribute."""
cube = set_up_probability_cube(self.data, self.threshold_points,)
cube.coord("air_temperature").attributes = {
"relative_to_threshold": "greater_than"
}
result = probability_is_above_or_below(cube)
self.assertEqual(result, None)
def test_incorrect_attribute(self):
"""Tests it returns None if the spp__relative_to_threshold
attribute has an invalid value."""
cube = set_up_probability_cube(self.data, self.threshold_points,)
cube.coord("air_temperature").attributes = {
"spp__relative_to_threshold": "higher"
}
result = probability_is_above_or_below(cube)
self.assertEqual(result, None)
class Test_in_vicinity_name_format(unittest.TestCase):
"""Test that the 'in_vicinity' above/below threshold probability
cube naming function produces the correctly formatted names."""
def setUp(self):
"""Set up test cube"""
data = np.ones((3, 3, 3), dtype=np.float32)
threshold_points = np.array([276, 277, 278], dtype=np.float32)
self.cube = set_up_probability_cube(data, threshold_points)
self.cube.rename("probability_of_X_above_threshold")
def test_in_vicinity_name_format(self):
"""Test that 'in_vicinity' is added correctly to the name for both
above and below threshold cases"""
correct_name_above = "probability_of_X_in_vicinity_above_threshold"
new_name_above = in_vicinity_name_format(self.cube.name())
self.cube.rename("probability_of_X_below_threshold")
correct_name_below = "probability_of_X_in_vicinity_below_threshold"
new_name_below = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name_above, correct_name_above)
self.assertEqual(new_name_below, correct_name_below)
def test_between_thresholds(self):
"""Test for "between_thresholds" suffix"""
self.cube.rename("probability_of_visibility_between_thresholds")
correct_name = "probability_of_visibility_in_vicinity_between_thresholds"
new_name = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name, correct_name)
def test_no_above_below_threshold(self):
"""Test the case of name without above/below_threshold is handled
correctly"""
self.cube.rename("probability_of_X")
correct_name_no_threshold = "probability_of_X_in_vicinity"
new_name_no_threshold = in_vicinity_name_format(self.cube.name())
self.assertEqual(new_name_no_threshold, correct_name_no_threshold)
def test_in_vicinity_already_exists(self):
"""Test the case of 'in_vicinity' already existing in the cube name"""
self.cube.rename("probability_of_X_in_vicinity")
result = in_vicinity_name_format(self.cube.name())
self.assertEqual(result, "probability_of_X_in_vicinity")
class Test_get_threshold_coord_name_from_probability_name(unittest.TestCase):
"""Test utility to derive threshold coordinate name from probability cube name"""
def test_above_threshold(self):
"""Test correct name is returned from a standard (above threshold)
probability field"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_air_temperature_above_threshold"
)
self.assertEqual(result, "air_temperature")
def test_below_threshold(self):
"""Test correct name is returned from a probability below threshold"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_air_temperature_below_threshold"
)
self.assertEqual(result, "air_temperature")
def test_between_thresholds(self):
"""Test correct name is returned from a probability between thresholds
"""
result = get_threshold_coord_name_from_probability_name(
"probability_of_visibility_in_air_between_thresholds"
)
self.assertEqual(result, "visibility_in_air")
def test_in_vicinity(self):
"""Test correct name is returned from an "in vicinity" probability.
Name "cloud_height" is used in this test to illustrate why suffix
cannot be removed with "rstrip"."""
diagnostic = "cloud_height"
result = get_threshold_coord_name_from_probability_name(
f"probability_of_{diagnostic}_in_vicinity_above_threshold"
)
self.assertEqual(result, diagnostic)
def test_error_not_probability(self):
"""Test exception if input is not a probability cube name"""
with self.assertRaises(ValueError):
get_threshold_coord_name_from_probability_name("lwe_precipitation_rate")
class Test_get_diagnostic_cube_name_from_probability_name(unittest.TestCase):
"""Test utility to derive diagnostic cube name from probability cube name"""
def test_basic(self):
"""Test correct name is returned from a point probability field"""
diagnostic = "air_temperature"
result = get_diagnostic_cube_name_from_probability_name(
f"probability_of_{diagnostic}_above_threshold"
)
self.assertEqual(result, diagnostic)
def test_in_vicinity(self):
"""Test the full vicinity name is returned from a vicinity probability
field"""
diagnostic = "precipitation_rate"
result = get_diagnostic_cube_name_from_probability_name(
f"probability_of_{diagnostic}_in_vicinity_above_threshold"
)
self.assertEqual(result, f"{diagnostic}_in_vicinity")
def test_error_not_probability(self):
"""Test exception if input is not a probability cube name"""
with self.assertRaises(ValueError):
get_diagnostic_cube_name_from_probability_name("lwe_precipitation_rate")
class Test_is_probability(IrisTest):
"""Test the is_probability function"""
def setUp(self):
"""Set up test data"""
self.data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
self.prob_cube = set_up_probability_cube(self.data, self.threshold_points)
def test_true(self):
"""Test a probability cube evaluates as true"""
result = is_probability(self.prob_cube)
self.assertTrue(result)
def test_scalar_threshold_coord(self):
"""Test a probability cube with a single threshold evaluates as true"""
cube = iris.util.squeeze(self.prob_cube[0])
result = is_probability(cube)
self.assertTrue(result)
def test_false(self):
"""Test cube that does not contain thresholded probabilities
evaluates as false"""
cube = set_up_variable_cube(
self.data, name="probability_of_rain_at_surface", units="1"
)
result = is_probability(cube)
self.assertFalse(result)
class Test_find_threshold_coordinate(IrisTest):
"""Test the find_threshold_coordinate function"""
def setUp(self):
"""Set up test probability cubes with old and new threshold coordinate
naming conventions"""
data = np.ones((3, 3, 3), dtype=np.float32)
self.threshold_points = np.array([276, 277, 278], dtype=np.float32)
cube = set_up_probability_cube(data, self.threshold_points)
self.cube_new = cube.copy()
self.cube_old = cube.copy()
self.cube_old.coord("air_temperature").rename("threshold")
def test_basic(self):
"""Test function returns an iris.coords.Coord"""
threshold_coord = find_threshold_coordinate(self.cube_new)
self.assertIsInstance(threshold_coord, iris.coords.Coord)
def test_old_convention(self):
"""Test function recognises threshold coordinate with name "threshold"
"""
threshold_coord = find_threshold_coordinate(self.cube_old)
self.assertEqual(threshold_coord.name(), "threshold")
self.assertArrayAlmostEqual(threshold_coord.points, self.threshold_points)
def test_new_convention(self):
"""Test function recognises threshold coordinate with standard
diagnostic name and "threshold" as var_name"""
threshold_coord = find_threshold_coordinate(self.cube_new)
self.assertEqual(threshold_coord.name(), "air_temperature")
self.assertEqual(threshold_coord.var_name, "threshold")
self.assertArrayAlmostEqual(threshold_coord.points, self.threshold_points)
def test_fails_if_not_cube(self):
"""Test error if given a non-cube argument"""
msg = "Expecting data to be an instance of iris.cube.Cube"
with self.assertRaisesRegex(TypeError, msg):
find_threshold_coordinate([self.cube_new])
def test_fails_if_no_threshold_coord(self):
"""Test error if no threshold coordinate is present"""
self.cube_new.coord("air_temperature").var_name = None
msg = "No threshold coord found"
with self.assertRaisesRegex(CoordinateNotFoundError, msg):
find_threshold_coordinate(self.cube_new)
class Test_find_percentile_coordinate(IrisTest):
"""Test whether the cube has a percentile coordinate."""
def setUp(self):
"""Create a wind-speed and wind-gust cube with percentile coord."""
data = np.zeros((2, 3, 3), dtype=np.float32)
percentiles = | np.array([50.0, 90.0], dtype=np.float32) | numpy.array |
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN PARALLEL
Created on 2019
@author: <NAME>
"""
import os
import sys
import h5py
import timeit
import random
import numpy as np
from mpi4py import MPI
from netCDF4 import Dataset
def read_filelist(loc_dir,prefix,yr,day,fileformat):
# Read the filelist in the specific directory
str = os.popen("ls "+ loc_dir + prefix + yr + day + "*."+fileformat).read()
fname = np.array(str.split("\n"))
fname = np.delete(fname,len(fname)-1)
return fname
def read_MODIS(fname1,fname2,verbose=False): # READ THE HDF FILE
# Read the cloud mask from MYD06_L2 product')
ncfile=Dataset(fname1,'r')
CM1km = np.array(ncfile.variables['Cloud_Mask_1km'])
CM = ( | np.array(CM1km[:,:,0],dtype='byte') | numpy.array |
import typing
import numpy as np
from typing import List
class Solution:
def splitPainting(
self,
segments: List[List[int]],
) -> List[List[int]]:
n = 1 << 17
s = np.zeros(
n,
dtype=np.int64,
)
l, r, c = np.array(
segments,
).T
np.add.at(s, l, c)
| np.add.at(s, r, -c) | numpy.add.at |
import numpy as np
import torch
import rlkit.torch.pytorch_util as ptu
class StackedReplayBuffer:
def __init__(self, max_replay_buffer_size, time_steps, observation_dim, action_dim, task_indicator_dim, data_usage_reconstruction, data_usage_sac, num_last_samples, permute_samples, encoding_mode):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._task_indicator_dim = task_indicator_dim
self._max_replay_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size, observation_dim), dtype=np.float32)
self._next_obs = np.zeros((max_replay_buffer_size, observation_dim), dtype=np.float32)
self._actions = np.zeros((max_replay_buffer_size, action_dim), dtype=np.float32)
self._rewards = np.zeros((max_replay_buffer_size, 1), dtype=np.float32)
# task indicator computed through encoder
self._base_task_indicators = np.zeros(max_replay_buffer_size, dtype=np.float32)
self._task_indicators = np.zeros((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)
self._next_task_indicators = np.zeros((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)
self._true_task = np.zeros((max_replay_buffer_size, 1), dtype=object) # filled with dicts with keys 'base', 'specification'
self._sparse_rewards = np.zeros((max_replay_buffer_size, 1), dtype=np.float32)
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros((max_replay_buffer_size, 1), dtype='uint8')
self.time_steps = time_steps
self._top = 0
self._size = 0
self._episode_starts = []
# allowed points specify locations in the buffer, that, alone or together with the <self.time_step> last entries
# can be sampled
self._allowed_points = []
self._train_indices = []
self._val_indices = []
self.stats_dict = None
self.data_usage_reconstruction = data_usage_reconstruction
self.data_usage_sac = data_usage_sac
self.num_last_samples = num_last_samples
self.permute_samples = permute_samples
self.encoding_mode = encoding_mode
self.add_zero_elements()
self._cur_episode_start = self._top
def add_zero_elements(self):
# TODO: as already spawned as zeros, actually not zero writing needed, could only advance
for t in range(self.time_steps):
self.add_sample(
np.zeros(self._observation_dim),
np.zeros(self._action_dim),
np.zeros(1),
np.zeros(1, dtype='uint8'),
np.zeros(self._observation_dim),
np.zeros(self._task_indicator_dim),
np.zeros(self._task_indicator_dim),
np.zeros(1)
#env_info=dict(sparse_reward=0)
)
def add_episode(self, episode):
# Assume all array are same length (as they come from same rollout)
length = episode['observations'].shape[0]
# check, if whole episode fits into buffer
if length >= self._max_replay_buffer_size:
error_string =\
"-------------------------------------------------------------------------------------------\n\n" \
"ATTENTION:\n" \
"The current episode was longer than the replay buffer and could not be fitted in.\n" \
"Please consider decreasing the maximum episode length or increasing the task buffer size.\n\n" \
"-------------------------------------------------------------------------------------------"
print(error_string)
return
if self._size + length >= self._max_replay_buffer_size:
# A bit space is not used, but assuming a big buffer it does not matter so much
# TODO: additional 0 samples must be added
self._top = 0
low = self._top
high = self._top + length
self._observations[low:high] = episode['observations']
self._next_obs[low:high] = episode['next_observations']
self._actions[low:high] = episode['actions']
self._rewards[low:high] = episode['rewards']
self._task_indicators[low:high] = episode['task_indicators']
self._next_task_indicators[low:high] = episode['next_task_indicators']
self._terminals[low:high] = episode['terminals']
self._true_task[low:high] = episode['true_tasks']
self._advance_multi(length)
self.terminate_episode()
def add_sample(self, observation, action, reward, terminal,
next_observation, task_indicator, next_task_indicator, true_task, **kwargs):
self._observations[self._top] = observation
self._next_obs[self._top] = next_observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._task_indicators[self._top] = task_indicator
self._next_task_indicators[self._top] = next_task_indicator
self._terminals[self._top] = terminal
self._true_task[self._top] = true_task
self._advance()
def terminate_episode(self):
# store the episode beginning once the episode is over
# n.b. allows last episode to loop but whatever
self._episode_starts.append(self._cur_episode_start)
# TODO: allowed points must be "reset" at buffer overflow
self._allowed_points += list(range(self._cur_episode_start, self._top))
self.add_zero_elements()
self._cur_episode_start = self._top
def size(self):
return self._size
def get_allowed_points(self):
return self._allowed_points
def _advance(self):
self._top = (self._top + 1) % self._max_replay_buffer_size
if self._size < self._max_replay_buffer_size:
self._size += 1
def _advance_multi(self, length):
self._top = (self._top + length) % self._max_replay_buffer_size
if self._size + length <= self._max_replay_buffer_size:
self._size += length
else:
self._size = self._max_replay_buffer_size
def sample_data(self, indices):
return dict(
observations=self._observations[indices],
next_observations=self._next_obs[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
task_indicators=self._task_indicators[indices],
next_task_indicators=self._next_task_indicators[indices],
sparse_rewards=self._sparse_rewards[indices],
terminals=self._terminals[indices],
true_tasks=self._true_task[indices]
)
def get_indices(self, points, batch_size, prio=None):
if prio == 'linear':
# prioritized version: later samples get more weight
weights = np.linspace(0.1, 0.9, points.shape[0])
weights = weights / np.sum(weights)
indices = np.random.choice(points, batch_size, replace=True if batch_size > points.shape[0] else False, p=weights)
elif prio == 'cut':
indices = np.random.choice(points[-self.num_last_samples:], batch_size, replace=True if batch_size > points[-self.num_last_samples:].shape[0] else False)
elif prio == 'tree_sampling':
# instead of using 'np.random.choice' directly on the whole 'points' array, which is O(n)
# and highly inefficient for big replay buffers, we subdivide 'points' in buckets, which we apply
# 'np.random.choice' to.
# 'points' needs to be shuffled already, to ensure i.i.d assumption
root = int(np.sqrt(points.shape[0]))
if root < batch_size:
indices = np.random.choice(points, batch_size, replace=True if batch_size > points.shape[0] else False)
else:
partition = int(points.shape[0] / root)
division = | np.random.randint(0, root) | numpy.random.randint |
"""
timer_sim.py
simulates the timer hardware for debugging of the pinewood applications.
Copyright [2019] [<NAME>]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import numpy as np
import select
import time
import tkinter as tk
from queue import Queue
from threading import Thread, Lock
from typing import Iterable
infile = "lane_hosts.csv"
ready_msg = "<Ready to Race.>".encode('utf-8')
go_msg = "<GO!>".encode('utf-8')
reset_msg = "<Reset recieved.>".encode('utf-8')
time_prefix = "<Track count:".encode('utf-8')
time_suffix = ">".encode('utf-8')
stringlen = 64
race_ready = False
running_race = True
mutex = Lock()
class Lane:
def __init__(self, idx):
self.number = idx + 1
self.index = idx
self.reporting = None
self.connection = None
self.address = None
self.queue = Queue(maxsize=2)
self.host = ''
self.port = ''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.check_button = None
self.drop_button = None
def add_lane_to_window(self, parent: tk.Widget):
self.reporting = tk.BooleanVar()
self.reporting.set(True)
frame = tk.Frame(parent)
frame.pack()
self.check_button = tk.Checkbutton(frame, text="Lane {}".format(self.number)
, variable=self.reporting)
self.check_button.pack(side=tk.LEFT)
self.drop_button = tk.Button(frame, text="Drop", command=self.drop_connection)
self.drop_button.pack(side=tk.RIGHT)
def drop_connection(self):
if self.drop_button['text'] == 'Drop':
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self._socket.close()
self.drop_button['text'] = 'Connect'
else:
self.start_socket()
def start_socket(self):
Thread(target=self._await_connection, daemon=True).start()
def _await_connection(self):
print("Setting up connection to {}:{}".format(self.host, self.port))
while True:
try:
self._socket.bind((self.host, self.port))
except OSError:
print(f"Unable to connect to {self.host}:{self.port}. It is probably already in use. Retry in 5 seconds.")
time.sleep(5.0)
else:
break
self._socket.listen(2)
print("Awaiting connection on {}:{}".format(self.host, self.port))
new_conn, new_addr = self._socket.accept()
mutex.acquire()
self.queue.put(new_conn)
self.queue.put(new_addr)
print("Connection from {} established.".format(self.host))
self.queue.task_done()
mutex.release()
def close_socket(self):
try:
self.connection.close()
except AttributeError:
pass
def get_connections(self):
if self.queue.full():
self.connection = self.queue.get()
self.address = self.queue.get()
return self.connection, self.address
def shutdown_connection(self):
try:
self.connection.shutdown(socket.SHUT_RDWR)
except AttributeError:
pass
def close_connection(self):
try:
self.connection.close()
except AttributeError:
pass
class MainWindow:
def __init__(self, lanes: Iterable[Lane]):
self.window = tk.Tk()
for lane in lanes:
lane.add_lane_to_window(self.window)
lane.start_socket()
self.reset_button = tk.Button(self.window, text="Reset", command=not_ready)
self.reset_button.pack()
self.racing_button = tk.Button(self.window, text="Racing", command=self.toggle_racing)
self.racing_button.pack()
self.window.protocol("WM_DELETE_WINDOW", close_manager)
self.window.update()
def toggle_racing(self):
global running_race
if self.racing_button.config('text')[-1] == "Racing":
self.racing_button.config(text="On Hold")
running_race = False
else:
self.racing_button.config(text="Racing")
running_race = True
def update(self):
self.window.update()
self.window.update_idletasks()
def activate_reset_button(self):
self.reset_button.configure(command=race_reset)
self.update()
def deactivate_reset_button(self):
self.reset_button.configure(command=not_ready)
self.update()
def make_str(race_number):
new_times = 12.0 + np.random.randn(4) / 10.0
time_str = ["{:5.3f}".format(x) for x in new_times]
time_str.insert(0, "{:5}".format(race_number))
return ','.join(time_str), race_number + 1
def set_host_and_port(lanes: Iterable[Lane], infile: str):
with open(infile) as fp:
for line in fp:
laneNumber, hostAddress, hostPort = line.split(',')
li = int(laneNumber) - 1
lanes[li].host = hostAddress
lanes[li].port = int(hostPort)
return lanes
def race_reset():
global race_ready, the_lanes
for lane in the_lanes:
lane.connection.sendall(reset_msg)
lane.connection.sendall(ready_msg)
race_ready = True
def time_msg():
"""Normally distributed random numbers around 4 seconds"""
racer_time = | np.random.randn() | numpy.random.randn |
# Practice sites
#https://www.machinelearningplus.com/python/101-numpy-exercises-python/
#http://www.cs.umd.edu/~nayeem/courses/MSML605/files/04_Lec4_List_Numpy.pdf
#https://www.gormanalysis.com/blog/python-numpy-for-your-grandma/
#https://nickmccullum.com/advanced-python/numpy-indexing-assignment/
# 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
##? 1. Import numpy as np and see the version
# Difficulty Level: L1
# Q. Import numpy as np and print the version number.
import numpy as np
print(np.__version__)
##? 2. How to create a 1D array?
# Difficulty Level: L1
# Q. Create a 1D array of numbers from 0 to 9
arr = np.arange(10)
arr
##? 3. How to create a boolean array?
# Difficulty Level: L1
# Q. Create a 3×3 numpy array of all True’s
arr = np.full((3,3), True, dtype=bool)
arr
##? 4. How to extract items that satisfy a given condition from 1D array?
# Difficulty Level: L1
# Q. Extract all odd numbers from arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1]
##? 5. How to replace items that satisfy a condition with another value in numpy array?
# Difficulty Level: L1
# Q. Replace all odd numbers in arr with -1
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
arr[arr % 2 == 1] = -1
arr
##? 6. How to replace items that satisfy a condition without affecting the original array?
# Difficulty Level: L2
# Q. Replace all odd numbers in arr with -1 without changing arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
#1 np.where
out = np.where(arr % 2 == 1, -1, arr)
out
#2 list comp
out = np.array([-1 if x % 2 == 1 else x for x in arr])
out
##? 7. How to reshape an array?
# Difficulty Level: L1
# Q. Convert a 1D array to a 2D array with 2 rows
arr = np.arange(10)
arr.reshape(2, -1)
# Setting y to -1 automatically decides number of columns.
# Could do the same with
arr.reshape(2, 5)
##? 8. How to stack two arrays vertically?
# Difficulty Level: L2
# Q. Stack arrays a and b vertically
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)
#1
np.vstack([a, b])
#2
np.concatenate([a, b], axis=0)
#3
np.r_[a, b]
# 9. How to stack two arrays horizontally?
# Difficulty Level: L2
# Q. Stack the arrays a and b horizontally.
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)
#1
np.hstack([a, b])
#2
np.concatenate([a, b], axis=1)
#3
np.c_[a, b]
##? 10. How to generate custom sequences in numpy without hardcoding?
# Difficulty Level: L2
# Q. Create the following pattern without hardcoding.
# Use only numpy functions and the below input array a.
a = np.array([1,2,3])
np.r_[np.repeat(a,3), np.tile(a, 3)]
##? 11. How to get the common items between two python numpy arrays?
# Difficulty Level: L2
# Q. Get the common items between a and b
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.intersect1d(a, b)
##? 12. How to remove from one array those items that exist in another?
# Difficulty Level: L2
# Q. From array a remove all items present in array b
a = np.array([1,2,3,4,5])
b = np.array([5,6,7,8,9])
# From 'a' remove all of 'b'
np.setdiff1d(a,b)
##? 13. How to get the positions where elements of two arrays match?
# Difficulty Level: L2
# Q. Get the positions where elements of a and b match
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.where(a==b)
# 14. How to extract all numbers between a given range from a numpy array?
# Difficulty Level: L2
# Q. Get all items between 5 and 10 from a.
a = np.array([2, 6, 1, 9, 10, 3, 27])
#1
idx = np.where((a>=5) & (a<=10))
a[idx]
#2
idx = np.where(np.logical_and(a >= 5, a <= 10))
a[idx]
#3
a[(a >= 5) & (a <= 10)]
##? 15. How to make a python function that handles scalars to work on numpy arrays?
# Difficulty Level: L2
# Q. Convert the function maxx that works on two scalars, to work on two arrays.
def maxx(x:np.array, y:np.array):
"""Get the maximum of two items"""
if x >= y:
return x
else:
return y
a = np.array([5, 7, 9, 8, 6, 4, 5])
b = np.array([6, 3, 4, 8, 9, 7, 1])
pair_max = np.vectorize(maxx, otypes=[float])
pair_max(a, b)
##? 16. How to swap two columns in a 2d numpy array?
# Difficulty Level: L2
# Q. Swap columns 1 and 2 in the array arr.
arr = np.arange(9).reshape(3,3)
arr
arr[:, [1, 0, 2]]
#by putting brackets inside the column slice. You have access to column indices
##? 17. How to swap two rows in a 2d numpy array?
# Difficulty Level: L2
# Q. Swap rows 1 and 2 in the array arr:
arr = np.arange(9).reshape(3,3)
arr
arr[[0, 2, 1], :]
#same goes here for the rows
##? 18. How to reverse the rows of a 2D array?
# Difficulty Level: L2
# Q. Reverse the rows of a 2D array arr.
# Input
arr = np.arange(9).reshape(3,3)
arr
arr[::-1, :]
#or
arr[::-1]
# 19. How to reverse the columns of a 2D array?
# Difficulty Level: L2
# Q. Reverse the columns of a 2D array arr.
# Input
arr = np.arange(9).reshape(3,3)
arr
arr[:,::-1]
##? 20. How to create a 2D array containing random floats between 5 and 10?
# Difficulty Level: L2
# Q. Create a 2D array of shape 5x3 to contain random decimal numbers between 5 and 10.
arr = np.arange(9).reshape(3,3)
#1
rand_arr = np.random.randint(low=5, high=10, size=(5,3)) + np.random.random((5,3))
rand_arr
#2
rand_arr = np.random.uniform(5, 10, size=(5,3))
rand_arr
##? 21. How to print only 3 decimal places in python numpy array?
# Difficulty Level: L1
# Q. Print or show only 3 decimal places of the numpy array rand_arr.
rand_arr = np.random.random((5,3))
rand_arr
rand_arr = np.random.random([5,3])
np.set_printoptions(precision=3)
rand_arr[:4]
##? 22. How to pretty print a numpy array by suppressing the scientific notation (like 1e10)?
# Difficulty Level: L1
# Q. Pretty print rand_arr by suppressing the scientific notation (like 1e10)
#Reset printoptions
np.set_printoptions(suppress=False)
# Create the random array
np.random.seed(100)
rand_arr = np.random.random([3,3])/1e3
rand_arr
#Set precision and suppress e notation
np.set_printoptions(suppress=True, precision=6)
rand_arr
##? 23. How to limit the number of items printed in output of numpy array?
# Difficulty Level: L1
# Q. Limit the number of items printed in python numpy array a to a maximum of 6 elements.
a = np.arange(15)
#set the elements to print in threshold
np.set_printoptions(threshold=6)
a
# reset the threshold to default
np.set_printoptions(threshold=1000)
##? 24. How to print the full numpy array without truncating
# Difficulty Level: L1
# Q. Print the full numpy array a without truncating.
a = np.arange(15)
# reset the threshold to default
np.set_printoptions(threshold=1000)
a
##? 25. How to import a dataset with numbers and texts keeping the text intact in python numpy?
# Difficulty Level: L2
# Q. Import the iris dataset keeping the text intact.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype="object")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
iris[:3]
##? 26. How to extract a particular column from 1D array of tuples?
# Difficulty Level: L2
# Q. Extract the text column species from the 1D iris imported in previous question.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
species = np.array([col[4] for col in iris_1d])
species[:5]
##? 27. How to convert a 1d array of tuples to a 2d numpy array?
# Difficulty Level: L2
# Q. Convert the 1D iris to 2D array iris_2d by omitting the species text field.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8")
#1
no_species_2d = np.array([row.tolist()[:4] for row in iris_1d])
no_species_2d[:3]
#2
# Can directly specify columns to use with the "usecols" method
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
no_species_2d = np.genfromtxt(url, delimiter=',', dtype=None, encoding = "UTF-8", usecols=[0,1,2,3])
no_species_2d[:3]
##? 28. How to compute the mean, median, standard deviation of a numpy array?
# Difficulty: L1
# Q. Find the mean, median, standard deviation of iris's sepallength (1st column)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
# or
sepal = np.array([col[0] for col in iris_1d])
# or
sepal = np.array([col.tolist()[0] for col in iris_1d])
mu, med, sd = np.mean(sepal), np.median(sepal), np.std(sepal)
np.set_printoptions(precision=2)
print(f'The mean is {mu} \nThe median is {med} \nThe standard deviation is {sd}')
##? 29. How to normalize an array so the values range exactly between 0 and 1?
# Difficulty: L2
# Q. Create a normalized form of iris's sepallength whose values range exactly between 0 and 1 so that the minimum has value 0 and maximum has value 1.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None, encoding="utf-8")
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0])
#1
smax, smin = np.max(sepal), np.min(sepal)
S = (sepal-smin)/(smax-smin)
S
#2
S = (sepal-smin)/sepal.ptp()
S
##? 30. How to compute the softmax score?
# Difficulty Level: L3
# Q. Compute the softmax score of sepallength.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = np.genfromtxt(url, delimiter=',', dtype=float, usecols=[0], encoding="utf-8")
#or
sepal = np.genfromtxt(url, delimiter=',', dtype='object')
sepal = np.array([float(row[0]) for row in sepal])
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python"""
#1
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x/ e_x.sum(axis=0)
softmax(sepal)
##? 31. How to find the percentile scores of a numpy array?
# Difficulty Level: L1
# Q. Find the 5th and 95th percentile of iris's sepallength
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
sepal = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])
np.percentile(sepal, q=[5, 95])
##? 32. How to insert values at random positions in an array?
# Difficulty Level: L2
# Q. Insert np.nan values at 20 random positions in iris_2d dataset
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', encoding="utf-8")
#Can change object to float if you want
#1
i, j = np.where(iris_2d)
# i, j contain the row numbers and column numbers of the 600 elements of Irix_x
np.random.seed(100)
iris_2d[np.random.choice(i, 20), np.random.choice((j), 20)] = np.nan
#Checking nans in 2nd column
np.isnan(iris_2d[:, 1]).sum()
#Looking over all rows/columns
np.isnan(iris_2d[:, :]).sum()
#2
np.random.seed(100)
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)]=np.nan
#Looking over all rows/columns
np.isnan(iris_2d[:, :]).sum()
##? 33. How to find the position of missing values in numpy array?
# Difficulty Level: L2
# Q. Find the number and position of missing values in iris_2d's sepallength (1st column)
# ehh already did that? Lol. Using above filtered array from method 2 in
# question 32
np.isnan(iris_2d[:, 0]).sum()
#Indexes of which can be found with
np.where(np.isnan(iris_2d[:, 0]))
##? 34. How to filter a numpy array based on two or more conditions?
# Difficulty Level: L3
# Q. Filter the rows of iris_2d that has petallength (3rd column) > 1.5
# and sepallength (1st column) < 5.0
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
filt_cond = (iris_2d[:,0] < 5.0) & (iris_2d[:, 2] > 1.5)
iris_2d[filt_cond]
##? 35. How to drop rows that contain a missing value from a numpy array?
# Difficulty Level: L3:
# Q. Select the rows of iris_2d that does not have any nan value.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan
#1
#No direct numpy implementation
iris_drop = np.array([~np.any(np.isnan(row)) for row in iris_2d])
#Look at first 5 rows of drop
iris_2d[iris_drop][:5]
#2
iris_2d[np.sum(np.isnan(iris_2d), axis=1)==0][:5]
##? 36. How to find the correlation between two columns of a numpy array?
# Difficulty Level: L2
# Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
#1
np.corrcoef(iris_2d[:, 0], iris_2d[:, 2])[0, 1]
#2
from scipy.stats.stats import pearsonr
corr, p_val = pearsonr(iris_2d[:, 0], iris_2d[:, 2])
print(corr)
# Correlation coef indicates the degree of linear relationship between two numeric variables.
# It can range between -1 to +1.
# The p-value roughly indicates the probability of an uncorrelated system producing
# datasets that have a correlation at least as extreme as the one computed.
# The lower the p-value (<0.01), greater is the significance of the relationship.
# It is not an indicator of the strength.
#> 0.871754157305
##? 37. How to find if a given array has any null values?
# Difficulty Level: L2
# Q. Find out if iris_2d has any missing values.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
np.isnan(iris_2d[:, :]).any()
##? 38. How to replace all missing values with 0 in a numpy array?
# Difficulty Level: L2
# Q. Replace all occurrences of nan with 0 in numpy array
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan
#Check for nans
np.any(~np.isnan(iris_2d[:, :]))
#Set Indexes of of the nans = 0
iris_2d[np.isnan(iris_2d)] = 0
#Check the same indexes
np.where(iris_2d==0)
#Check first 10 rows
iris_2d[:10]
##? 39. How to find the count of unique values in a numpy array?
# Difficulty Level: L2
# Q. Find the unique values and the count of unique values in iris's species
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object', encoding="utf-8")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
species = np.array([row.tolist()[4] for row in iris])
np.unique(species, return_counts=True)
#2
np.unique(iris[:, 4], return_counts=True)
##? 40. How to convert a numeric to a categorical (text) array?
# Difficulty Level: L2
# Q. Bin the petal length (3rd) column of iris_2d to form a text array, such that if petal length is:
# Less than 3 --> 'small'
# 3-5 --> 'medium'
# '>=5 --> 'large'
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
#Bin the petal length
petal_length_bin = np.digitize(iris[:, 2].astype('float'), [0, 3, 5, 10])
#Map it to respective category.
label_map = {1: 'small', 2: 'medium', 3: 'large', 4: np.nan}
petal_length_cat = [label_map[x] for x in petal_length_bin]
petal_length_cat[:4]
#or
petal_length_cat = np.array(list(map(lambda x: label_map[x], petal_length_bin)))
petal_length_cat[:4]
##? 41. How to create a new column from existing columns of a numpy array?
# Difficulty Level: L2
# Q. Create a new column for volume in iris_2d,
# where volume is (pi x petallength x sepal_length^2)/3
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='object')
# Compute volume
sepallength = iris_2d[:, 0].astype('float')
petallength = iris_2d[:, 2].astype('float')
volume = (np.pi * petallength*sepallength**2)/3
# Introduce new dimension to match iris_2d's
volume = volume[:, np.newaxis]
# Add the new column
out = np.hstack([iris_2d, volume])
out[:4]
##? 42. How to do probabilistic sampling in numpy?
# Difficulty Level: L3
# Q. Randomly sample iris's species such that setosa
# is twice the number of versicolor and virginica
# Import iris keeping the text column intact
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
#Get species column
species = iris[:, 4]
#1 Generate Probablistically.
np.random.seed(100)
a = np.array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])
out = np.random.choice(a, 150, p=[0.5, 0.25, 0.25])
#Checking counts
np.unique(out[:], return_counts=True)
#2 Probablistic Sampling #preferred
np.random.seed(100)
probs = np.r_[np.linspace(0, 0.500, num=50), np.linspace(0.501, .0750, num=50), np.linspace(.751, 1.0, num=50)]
index = np.searchsorted(probs, np.random.random(150))
species_out = species[index]
print(np.unique(species_out, return_counts=True))
# Approach 2 is preferred because it creates an index variable that can be
# used to sample 2d tabular data.
##? 43. How to get the second largest value of an array when grouped by another array?
# Difficulty Level: L2
# Q. What is the value of second longest petallength of species setosa
# Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
petal_setosa = iris[iris[:, 4]==b'Iris-setosa', [2]].astype('float')
#1
#Note. Option 1 will return the second largest value 1.7, but with no repeats (np.unique()
np.unique(np.sort(petal_setosa))[-2]
#Note, options 2 and 3. these will return 1.9 because that is the second largest value.
#2
petal_setosa[np.argpartition(petal_setosa, -2)[-2]]
#3
petal_setosa[petal_setosa.argsort()[-2]]
#4
unq = np.unique(petal_setosa)
unq[np.argpartition(unq, -2)[-2]]
#Note: This method still gives back 1.9. As that is the 2nd largest value,
#So you'd have to filter for unique values. Then do the argpart on the unq array
##? 44. How to sort a 2D array by a column
# Difficulty Level: L2
# Q. Sort the iris dataset based on sepallength column.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
# dtype = [('sepallength', float), ('sepalwidth', float), ('petallength', float), ('petalwidth', float),('species', 'S10')]
iris = np.genfromtxt(url, delimiter=',', dtype="object")
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
#1
print(iris[iris[:,0].argsort()][:20])
#2
#!Only captures first column to sort
np.sort(iris[:, 0], axis=0)
#3
sorted(iris, key=lambda x: x[0])
##? 45. How to find the most frequent value in a numpy array?
# Difficulty Level: L1
# Q. Find the most frequent value of petal length (3rd column) in iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
vals, counts = np.unique(iris[:, 2], return_counts=True)
print(vals[np.argmax(counts)])
##? 46. How to find the position of the first occurrence of a value greater than a given value?
# Difficulty Level: L2
# Q. Find the position of the first occurrence of a value greater than 1.0 in petalwidth 4th column of iris dataset.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
#1
np.argwhere(iris[:, 3].astype(float) > 1.0)[0]
# 47. How to replace all values greater than a given value to a given cutoff?
# Difficulty Level: L2
# Q. From the array a, replace all values greater than 30 to 30 and less than 10 to 10.
np.set_printoptions(precision=2)
np.random.seed(100)
a = np.random.uniform(1,50, 20)
#1
np.clip(a, a_min=10, a_max=30)
#2
np.where(a < 10, 10, np.where(a > 30, 30, a))
#Tangent - Filtering condition
#Say we only want the values above 10 and below 30. Or operator | should help there.
filt_cond = (a < 10) | (a > 30)
a[filt_cond]
##? 48. How to get the positions of top n values from a numpy array?
# Difficulty Level: L2
# Q. Get the positions of top 5 maximum values in a given array a.
np.random.seed(100)
a = np.random.uniform(1,50, 20)
#1
a.argsort()[:5]
#2
np.argpartition(-a, 5)[:5]
# or (order is reversed though)
np.argpartition(a, -5)[-5:]
#To get the values.
#1
a[a.argsort()][-5:]
#2
np.sort(a)[-5:]
#3
np.partition(a, kth=-5)[-5:]
#4
a[np.argpartition(-a, 5)][:5]
#or
a[np.argpartition(a, -5)][-5:]
##? 49. How to compute the row wise counts of all possible values in an array?
# Difficulty Level: L4
# Q. Compute the counts of unique values row-wise.
np.random.seed(100)
arr = np.random.randint(1,11,size=(6, 10))
#Add a column of of the counts of each row
#Tangent fun
counts = np.array([np.unique(row).size for row in arr])
counts = counts.reshape(arr.shape[0], 1)
arr = np.hstack([arr, counts])
arr
#1
def row_counts(arr2d):
count_arr = [np.unique(row, return_counts=True) for row in arr2d]
return [[int(b[a==i]) if i in a else 0 for i in np.unique(arr2d)] for a, b in count_arr]
print(np.arange(1, 11))
row_counts(arr)
#2
arr = np.array([np.array(list('<NAME>')), np.array(list('narendramodi')), np.array(list('jjayalalitha'))])
print(np.unique(arr))
row_counts(arr)
##? 50. How to convert an array of arrays into a flat 1d array?
# Difficulty Level: 2
# Q. Convert array_of_arrays into a flat linear 1d array.
# Input:
arr1 = np.arange(3)
arr2 = np.arange(3,7)
arr3 = np.arange(7,10)
array_of_arrays = np.array([arr1, arr2, arr3])
array_of_arrays
#1 - List comp
arr_2d = [a for arr in array_of_arrays for a in arr]
arr_2d
#2 - concatenate
arr_2d = np.concatenate([arr1, arr2, arr3])
arr_2d
#3 - hstack
arr_2d = np.hstack([arr1, arr2, arr3])
arr_2d
#4 - ravel
arr_2d = np.concatenate(array_of_arrays).ravel() #ravel flattens the array
arr_2d
##? 51. How to generate one-hot encodings for an array in numpy?
# Difficulty Level L4
# Q. Compute the one-hot encodings (dummy binary variables for each unique value in the array)
# Input
np.random.seed(101)
arr = np.random.randint(1,11, size=20)
arr
#1
def one_hot_encode(arr):
uniqs = np.unique(arr)
out = np.zeros((arr.shape[0], uniqs.shape[0]))
for i, k in enumerate(arr):
out[i, k-1] = 1
return out
print("\t",np.arange(1, 11))
one_hot_encode(arr)
#2
(arr[:, None] == np.unique(arr)).view(np.int8)
##? 52. How to create row numbers grouped by a categorical variable?
# Difficulty Level: L3
# Q. Create row numbers grouped by a categorical variable.
# Use the following sample from iris species as input.
#Input
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
#choose 20 species randomly
species_small = np.sort(np.random.choice(species, size=20))
species_small
#1
print([i for val in np.unique(species_small) for i, grp in enumerate(species_small[species_small==val])])
##? 53. How to create group ids based on a given categorical variable?
# Difficulty Level: L4
# Q. Create group ids based on a given categorical variable.
# Use the following sample from iris species as input.
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
species_small = np.sort(np.random.choice(species, size=20))
species_small
#1
[np.argwhere(np.unique(species_small) == s).tolist()[0][0] for val in np.unique(species_small) for s in species_small[species_small==val]]
#2
# Solution: For Loop version
output = []
uniqs = np.unique(species_small)
for val in uniqs: # uniq values in group
for s in species_small[species_small==val]: # each element in group
groupid = np.argwhere(uniqs == s).tolist()[0][0] # groupid
output.append(groupid)
print(output)
##? 54. How to rank items in an array using numpy?
# Difficulty Level: L2
# Q. Create the ranks for the given numeric array a.
#Input
np.random.seed(10)
a = np.random.randint(20, size=10)
print(a)
a.argsort().argsort()
##? 55. How to rank items in a multidimensional array using numpy?
# Difficulty Level: L3
# Q. Create a rank array of the same shape as a given numeric array a.
#Input
np.random.seed(10)
a = np.random.randint(20, size=[5,5])
print(a)
#1
print(a.ravel().argsort().argsort().reshape(a.shape))
#2
#Ranking the rows
tmp = a.argsort()[::-1]
np.arange(len(a))[tmp]+1
#2b
#Alternate ranking of rows (8x faster)
sidx = np.argsort(a, axis=1)
# Store shape info
m,n = a.shape
# Initialize output array
out = np.empty((m,n),dtype=int)
# Use sidx as column indices, while a range array for the row indices
# to select one element per row. Since sidx is a 2D array of indices
# we need to use a 2D extended range array for the row indices
out[np.arange(m)[:,None], sidx] = np.arange(n)
#3
#Ranking the columns
sidx = np.argsort(a, axis=0)
out[sidx, np.arange(n)] = np.arange(m)[:,None]
#4
#Ranking all the columns
tmp = a.argsort(axis=0).argsort(axis=0)[::-1]
np.arange(len(a))[tmp]+1
#3b Ranks for first column
tmp[:,0]
#3c Ranks for second column
tmp[:,1]
##? 56. How to find the maximum value in each row of a numpy array 2d?
# DifficultyLevel: L2
# Q. Compute the maximum for each row in the given array.
#Input
np.random.seed(100)
a = np.random.randint(1,10, [5,3])
a
#1
[np.max(row) for row in a]
#2
np.amax(a, axis=1)
#3
np.apply_along_axis(np.max, arr=a, axis=1)
##? 57. How to compute the min-by-max for each row for a numpy array 2d?
# DifficultyLevel: L3
# Q. Compute the min-by-max for each row for given 2d numpy array.
#Input
np.random.seed(100)
a = np.random.randint(1,10, [5,3])
a
#1
[np.min(row)/np.max(row) for row in a]
#2
np.apply_along_axis(lambda x: np.min(x)/ | np.max(x) | numpy.max |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vec3Array Class."""
from __future__ import annotations
import dataclasses
from typing import Union
from alphafold.model.geometry import struct_of_array
from alphafold.model.geometry import utils
import jax
import jax.numpy as jnp
import numpy as np
Float = Union[float, jnp.ndarray]
VERSION = '0.1'
@struct_of_array.StructOfArray(same_dtype=True)
class Vec3Array:
"""Vec3Array in 3 dimensional Space implemented as struct of arrays.
This is done in order to improve performance and precision.
On TPU small matrix multiplications are very suboptimal and will waste large
compute ressources, furthermore any matrix multiplication on tpu happen in
mixed bfloat16/float32 precision, which is often undesirable when handling
physical coordinates.
In most cases this will also be faster on cpu's/gpu's since it allows for
easier use of vector instructions.
"""
x: jnp.ndarray = dataclasses.field(metadata={'dtype': jnp.float32})
y: jnp.ndarray
z: jnp.ndarray
def __post_init__(self):
if hasattr(self.x, 'dtype'):
assert self.x.dtype == self.y.dtype
assert self.x.dtype == self.z.dtype
assert all([x == y for x, y in zip(self.x.shape, self.y.shape)])
assert all([x == z for x, z in zip(self.x.shape, self.z.shape)])
def __add__(self, other: Vec3Array) -> Vec3Array:
return jax.tree_multimap(lambda x, y: x + y, self, other)
def __sub__(self, other: Vec3Array) -> Vec3Array:
return jax.tree_multimap(lambda x, y: x - y, self, other)
def __mul__(self, other: Float) -> Vec3Array:
return jax.tree_map(lambda x: x * other, self)
def __rmul__(self, other: Float) -> Vec3Array:
return self * other
def __truediv__(self, other: Float) -> Vec3Array:
return jax.tree_map(lambda x: x / other, self)
def __neg__(self) -> Vec3Array:
return jax.tree_map(lambda x: -x, self)
def __pos__(self) -> Vec3Array:
return jax.tree_map(lambda x: x, self)
def cross(self, other: Vec3Array) -> Vec3Array:
"""Compute cross product between 'self' and 'other'."""
new_x = self.y * other.z - self.z * other.y
new_y = self.z * other.x - self.x * other.z
new_z = self.x * other.y - self.y * other.x
return Vec3Array(new_x, new_y, new_z)
def dot(self, other: Vec3Array) -> Float:
"""Compute dot product between 'self' and 'other'."""
return self.x * other.x + self.y * other.y + self.z * other.z
def norm(self, epsilon: float = 1e-6) -> Float:
"""Compute Norm of Vec3Array, clipped to epsilon."""
# To avoid NaN on the backward pass, we must use maximum before the sqrt
norm2 = self.dot(self)
if epsilon:
norm2 = jnp.maximum(norm2, epsilon**2)
return jnp.sqrt(norm2)
def norm2(self):
return self.dot(self)
def normalized(self, epsilon: float = 1e-6) -> Vec3Array:
"""Return unit vector with optional clipping."""
return self / self.norm(epsilon)
@classmethod
def zeros(cls, shape, dtype=jnp.float32):
"""Return Vec3Array corresponding to zeros of given shape."""
return cls(
jnp.zeros(shape, dtype), jnp.zeros(shape, dtype),
jnp.zeros(shape, dtype))
def to_array(self) -> jnp.ndarray:
return jnp.stack([self.x, self.y, self.z], axis=-1)
@classmethod
def from_array(cls, array):
return cls(*utils.unstack(array))
def __getstate__(self):
return (VERSION,
[np.asarray(self.x),
| np.asarray(self.y) | numpy.asarray |
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 rasg-affiliates
# Licensed under the 3-clause BSD License
"""Test Delay Spectrum calculations."""
from __future__ import print_function
import os
import numpy as np
import copy
import pytest
import unittest
from itertools import chain
from pyuvdata import UVBeam, UVData
import pyuvdata.tests as uvtest
from astropy import units
from astropy.cosmology import Planck15, WMAP9
from astropy.cosmology.units import littleh
from scipy.signal import windows
from simpleDS import DelaySpectrum
from simpleDS import utils
from simpleDS.data import DATA_PATH
from pyuvdata.data import DATA_PATH as UVDATA_PATH
@pytest.fixture()
def ds_from_uvfits():
"""Fixture to initialize a DS object."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
ds = DelaySpectrum(uv=[uvd])
ds.select_spectral_windows([(0, 10), (10, 20)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
ds.add_uvbeam(uvb=uvb)
yield ds
del ds, uvd, uvb
@pytest.fixture()
def ds_uvfits_and_uvb():
"""Fixture to also return the UVBeam object."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
ds = DelaySpectrum(uv=[uvd])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
ds.add_uvbeam(uvb=uvb)
yield ds, uvd, uvb
del ds, uvd, uvb
@pytest.fixture()
def ds_from_mwa():
"""Fixture to initialize a DS object."""
testfile = os.path.join(DATA_PATH, "mwa_full_poll.uvh5")
uvd = UVData()
uvd.read(testfile)
uvd.x_orientation = "east"
ds = DelaySpectrum(uv=[uvd])
yield ds
del ds, uvd
@pytest.fixture()
def ds_with_two_uvd():
"""Fixture to return DS object."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
uvd.x_orientation = "east"
ds = DelaySpectrum(uv=[uvd])
uvd.data_array += 1e3
ds.add_uvdata(uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
ds.add_uvbeam(uvb=uvb)
yield ds
del ds, uvd, uvb
class DummyClass(object):
"""A Dummy object for comparison."""
def __init__(self):
"""Do Nothing."""
pass
class TestDelaySpectrumInit(unittest.TestCase):
"""A test class to check DelaySpectrum objects."""
def setUp(self):
"""Initialize basic parameter, property and iterator tests."""
self.required_parameters = [
"_Ntimes",
"_Nbls",
"_Nfreqs",
"_Npols",
"_vis_units",
"_Ndelays",
"_freq_array",
"_delay_array",
"_data_array",
"_nsample_array",
"_flag_array",
"_lst_array",
"_ant_1_array",
"_ant_2_array",
"_baseline_array",
"_polarization_array",
"_uvw",
"_trcvr",
"_redshift",
"_k_perpendicular",
"_k_parallel",
"_beam_area",
"_beam_sq_area",
"_taper",
"_Nants_telescope",
"_Nants_data",
]
self.required_properties = [
"Ntimes",
"Nbls",
"Nfreqs",
"Npols",
"vis_units",
"Ndelays",
"freq_array",
"delay_array",
"data_array",
"nsample_array",
"flag_array",
"lst_array",
"ant_1_array",
"ant_2_array",
"baseline_array",
"polarization_array",
"uvw",
"trcvr",
"redshift",
"k_perpendicular",
"k_parallel",
"beam_area",
"beam_sq_area",
"taper",
"Nants_telescope",
"Nants_data",
]
self.extra_parameters = ["_power_array"]
self.extra_properties = ["power_array"]
self.dspec_object = DelaySpectrum()
def teardown(self):
"""Test teardown: delete object."""
del self.dspec_object
def test_required_parameter_iter_metadata_only(self):
"""Test expected required parameters."""
required = []
for prop in self.dspec_object.required():
required.append(prop)
for a in self.required_parameters:
if a.lstrip("_") not in chain(
self.dspec_object._visdata_params,
self.dspec_object._power_params,
self.dspec_object._thermal_params,
):
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
def test_required_parameter(self):
"""Test expected required parameters with data."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
self.dspec_object = DelaySpectrum(uv=[uvd])
required = []
for prop in self.dspec_object.required():
required.append(prop)
for a in self.required_parameters:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
def test_properties(self):
"""Test that properties can be get and set properly."""
prop_dict = dict(list(zip(self.required_properties, self.required_parameters)))
for k, v in prop_dict.items():
rand_num = np.random.rand()
setattr(self.dspec_object, k, rand_num)
this_param = getattr(self.dspec_object, v)
try:
assert rand_num == this_param.value
except (AssertionError):
print(
"setting {prop_name} to a random number failed".format(prop_name=k)
)
raise (AssertionError)
def test_errors_when_taper_not_function():
"""Test that init errors if taper not a function."""
pytest.raises(ValueError, DelaySpectrum, taper="test")
def test_error_for_multiple_baselines():
"""Test an error is raised if there are more than one unique baseline in input UVData."""
# testfile = os.path.join(UVDATA_PATH, 'hera19_8hrs_uncomp_10MHz_000_05.003111-05.033750.uvfits')
uvd = UVData()
uvd.baseline_array = np.array([1, 2])
uvd.uvw_array = np.array([[0, 1, 0], [1, 0, 0]])
# uvd.read(testfile)
# uvd.unphase_to_drift(use_ant_pos=True)
pytest.raises(ValueError, DelaySpectrum, uv=uvd)
def test_error_if_uv_not_uvdata():
"""Test error is raised when input uv is not a UVData object."""
bad_input = DummyClass()
pytest.raises(ValueError, DelaySpectrum, uv=bad_input)
def test_custom_taper():
"""Test setting custom taper."""
test_win = windows.blackman
dspec = DelaySpectrum(taper=test_win)
assert test_win == dspec.taper
def test_no_taper():
"""Test default setting of set_taper."""
dspec = DelaySpectrum()
dspec.set_taper()
assert dspec.taper == windows.blackmanharris
class TestBasicFunctions(unittest.TestCase):
"""Test basic equality functions."""
def setUp(self):
"""Initialize tests of basic methods."""
self.uvdata_object = UVData()
self.testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
self.uvdata_object.read(self.testfile)
self.dspec_object = DelaySpectrum(uv=self.uvdata_object)
self.dspec_object2 = copy.deepcopy(self.dspec_object)
def teardown(self):
"""Test teardown: delete objects."""
del self.dspec_object
del self.dspec_object2
del self.uvdata_object
def test_equality(self):
"""Basic equality test."""
print(np.allclose(self.dspec_object.flag_array, self.dspec_object2.flag_array))
assert self.dspec_object == self.dspec_object2
def test_check(self):
"""Test that check function operates as expected."""
assert self.dspec_object.check()
# test that it fails if we change values
self.dspec_object.Ntimes += 1
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Ntimes -= 1
self.dspec_object.Nbls += 1
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nbls -= 1
self.dspec_object.Nfreqs += 1
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nfreqs -= 1
self.dspec_object.Npols += 1
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Npols -= 1
self.dspec_object.Ndelays += 1
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Ndelays -= 1
self.dspec_object.Ndelays = np.float64(self.dspec_object.Ndelays)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Ndelays = np.int32(self.dspec_object.Ndelays)
self.dspec_object.polarization_array = (
self.dspec_object.polarization_array.astype(np.float32)
)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.polarization_array = (
self.dspec_object.polarization_array.astype(np.int64)
)
Nfreqs = copy.deepcopy(self.dspec_object.Nfreqs)
self.dspec_object.Nfreqs = None
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nfreqs = Nfreqs
self.dspec_object.vis_units = 2
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.vis_units = "Jy"
self.dspec_object.Nfreqs = (2, 1, 2)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nfreqs = Nfreqs
self.dspec_object.Nfreqs = np.complex64(2)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nfreqs = Nfreqs
freq_back = copy.deepcopy(self.dspec_object.freq_array)
self.dspec_object.freq_array = (
np.arange(self.dspec_object.Nfreqs).reshape(1, Nfreqs).tolist()
)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.freq_array = freq_back
freq_back = copy.deepcopy(self.dspec_object.freq_array)
self.dspec_object.freq_array = freq_back.value.copy() * units.m
pytest.raises(units.UnitConversionError, self.dspec_object.check)
self.dspec_object.freq_array = freq_back
self.dspec_object.freq_array = (
freq_back.value.astype(np.complex64) * freq_back.unit
)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.freq_array = freq_back
integration_time_back = copy.deepcopy(self.dspec_object.integration_time)
self.dspec_object.integration_time = integration_time_back.astype(complex)
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.integration_time = integration_time_back
Nuv = self.dspec_object.Nuv
self.dspec_object.Nuv = 10
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.Nuv = Nuv
self.dspec_object.data_type = "delay"
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.data_type = "frequency"
self.dspec_object.data_array = self.dspec_object.data_array * units.Hz
pytest.raises(ValueError, self.dspec_object.check)
self.dspec_object.data_type = "delay"
assert self.dspec_object.check()
self.dspec_object.data_type = "frequency"
self.dspec_object.data_array = self.dspec_object.data_array.value * units.Jy
assert self.dspec_object.check()
def test_add_wrong_units(self):
"""Test error is raised when adding a uvdata_object with the wrong units."""
uvd = UVData()
uvd.read(self.testfile)
uvd.vis_units = "K str"
pytest.raises(units.UnitConversionError, self.dspec_object.add_uvdata, uvd)
uvd.vis_units = "uncalib"
warn_message = [
"Data is uncalibrated. Unable to covert "
"noise array to unicalibrated units."
]
with pytest.raises(
units.UnitConversionError,
match="Input data object is in units incompatible",
):
with uvtest.check_warnings(UserWarning, warn_message):
self.dspec_object.add_uvdata(uvd)
def test_add_too_many_UVData(self):
"""Test error is raised when adding too many UVData objects."""
uvd = UVData()
uvd.read(self.testfile)
self.dspec_object.Nuv = 2
pytest.raises(ValueError, self.dspec_object.add_uvdata, uvd)
def test_incompatible_parameters(self):
"""Test UVData objects with incompatible paramters are rejected."""
uvd = UVData()
uvd.read(self.testfile)
uvd.select(freq_chans=np.arange(12))
pytest.raises(ValueError, self.dspec_object.add_uvdata, uvd)
def test_adding_spectral_windows_different_tuple_shape(self):
"""Test error is raised if spectral windows have different shape input."""
pytest.raises(
ValueError,
self.dspec_object.select_spectral_windows,
spectral_windows=((2, 3), (1, 2, 4)),
)
def test_adding_spectral_windows_different_lengths(self):
"""Test error is raised if spectral windows have different shape input."""
pytest.raises(
ValueError,
self.dspec_object.select_spectral_windows,
spectral_windows=((2, 3), (2, 6)),
)
def test_adding_multiple_spectral_windows(self):
"""Test multiple spectral windows are added correctly."""
self.dspec_object.select_spectral_windows([(3, 5), (7, 9)])
expected_shape = (
2,
1,
self.dspec_object.Npols,
self.dspec_object.Nbls,
self.dspec_object.Ntimes,
3,
)
assert expected_shape == self.dspec_object.data_array.shape
assert self.dspec_object.check()
def test_add_second_uvdata_object(self):
"""Test a second UVdata object can be added correctly."""
uvd = UVData()
uvd.read(self.testfile)
# multiply by a scalar here to track if it gets set in the correct slot
uvd.data_array *= np.sqrt(2)
self.dspec_object.add_uvdata(uvd)
assert self.dspec_object.Nuv == 2
assert np.allclose(
self.dspec_object.data_array[:, 0].value,
self.dspec_object.data_array[:, 1].value / np.sqrt(2),
)
def test_adding_spectral_window_one_tuple():
"""Test spectral window can be added when only one tuple given."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.select_spectral_windows(spectral_windows=(3, 12))
assert dspec_object.Nfreqs == 10
assert dspec_object.Ndelays == 10
assert np.allclose(dspec_object.freq_array.to("Hz").value, uvd.freq_array[:, 3:13])
def test_adding_spectral_window_between_uvdata():
"""Test that adding a spectral window between uvdata objects is handled."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.select_spectral_windows(spectral_windows=[(3, 12)])
uvd1 = copy.deepcopy(uvd)
dspec_object.add_uvdata(uvd1)
assert dspec_object.check()
def test_adding_new_uvdata_with_different_freqs():
"""Test error is raised when trying to add a uvdata object with different freqs."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.select_spectral_windows(spectral_windows=[(3, 12)])
uvd1 = copy.deepcopy(uvd)
uvd1.freq_array *= 11.1
pytest.raises(ValueError, dspec_object.add_uvdata, uvd1)
def test_adding_new_uvdata_with_different_lsts():
"""Test error is raised when trying to add a uvdata object with different LSTS."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.select_spectral_windows(spectral_windows=[(3, 12)])
uvd1 = copy.deepcopy(uvd)
uvd1.lst_array += (3 * units.min * np.pi / (12 * units.h).to("min")).value
# the actual output of this warning depends on the time difference of the
# arrays so we'll cheat on the check.
warn_message = ["Input LST arrays differ on average by"]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object.add_uvdata(uvd1)
def test_select_spectral_window_not_inplace():
"""Test it is possible to return a different object from select spectral window."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
new_dspec = dspec_object.select_spectral_windows(
spectral_windows=[(3, 12)], inplace=False
)
assert dspec_object != new_dspec
def test_loading_different_arrays():
"""Test error is raised trying to combine different arrays."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
bls = np.unique(uvd.baseline_array)[:-1]
ants = [uvd.baseline_to_antnums(bl) for bl in bls]
ants = [(a1, a2) for a1, a2 in ants]
uvd.select(bls=ants)
pytest.raises(ValueError, dspec_object.add_uvdata, uvd)
def test_loading_uvb_object():
"""Test a uvb object can have the beam_area and beam_sq_area read."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
uvb.select(frequencies=uvd.freq_array[0])
assert np.allclose(
uvb.get_beam_area(pol="pI"), dspec_object.beam_area.to("sr").value
)
def test_add_uvb_interp_areas():
"""Test that the returned interped uvb areas match the exact ones."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object2 = copy.deepcopy(dspec_object)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
uvb.freq_array += 1e6 # Add 1 MHz to force interpolation
assert uvb.freq_array != dspec_object.freq_array
dspec_object2.add_uvbeam(uvb=uvb)
assert np.allclose(
dspec_object.beam_area.to_value("sr"), dspec_object2.beam_area.to_value("sr")
)
assert np.allclose(
dspec_object.beam_sq_area.to_value("sr"),
dspec_object2.beam_sq_area.to_value("sr"),
)
assert np.allclose(
dspec_object.trcvr.to_value("K"), dspec_object2.trcvr.to_value("K")
)
def test_add_uvb_interp_missing_freqs():
"""Test that the built in UVBeam interps match the interped beam areas."""
pytest.importorskip("astropy_healpix")
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object2 = copy.deepcopy(dspec_object)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object2.add_uvbeam(uvb=uvb)
uvb.select(frequencies=uvb.freq_array.squeeze()[::2])
uvb.interpolation_function = "healpix_simple"
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
assert np.allclose(
dspec_object.beam_area.to_value("sr"), dspec_object2.beam_area.to_value("sr")
)
assert np.allclose(
dspec_object.beam_sq_area.to_value("sr"),
dspec_object2.beam_sq_area.to_value("sr"),
)
assert np.allclose(
dspec_object.trcvr.to_value("K"), dspec_object2.trcvr.to_value("K")
)
def test_add_uvdata_uvbeam_uvdata():
"""Test that a uvb can be added in between two uvdata objects."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
dspec_object.add_uvdata(uvd)
assert dspec_object.check()
def test_loading_uvb_object_no_data():
"""Test error is raised if adding a UVBeam object but no data."""
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
pytest.raises(ValueError, DelaySpectrum, uvb=uvb)
def test_loading_uvb_object_with_data():
"""Test uvbeam can be added in init."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uv = UVData()
uv.read(testfile)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object = DelaySpectrum(uv=uv, uvb=uvb)
assert dspec_object.check()
assert np.allclose(
uvb.get_beam_area(pol="pI"), dspec_object.beam_area.to("sr").value
)
def test_loading_uvb_object_with_trcvr():
"""Test a uvb object with trcvr gets added properly."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
uvb.receiver_temperature_array = np.ones((1, uvb.Nfreqs)) * 144
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
uvb.select(frequencies=uvd.freq_array[0])
assert np.allclose(
uvb.receiver_temperature_array[0], dspec_object.trcvr.to("K")[0].value
)
def test_loading_uvb_object_with_trcvr_interp():
"""Test a uvb object with trcvr gets added properly."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
uvb.receiver_temperature_array = np.ones((1, uvb.Nfreqs)) * 144
uvb.select(frequencies=uvb.freq_array.squeeze()[::2])
dspec_object.add_uvbeam(uvb=uvb, use_exact=False)
assert np.allclose(144, dspec_object.trcvr.to("K")[0].value)
def test_add_trcvr_scalar():
"""Test a scalar trcvr quantity is broadcast to the correct shape."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.add_trcvr(9 * units.K)
expected_shape = (dspec_object.Nspws, dspec_object.Nfreqs)
assert expected_shape == dspec_object.trcvr.shape
def test_add_trcvr_bad_number_of_spectral_windows():
"""Test error is raised if the number of spectral windows do not match with input trcvr."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
bad_temp = np.ones((4, 21)) * units.K
pytest.raises(ValueError, dspec_object.add_trcvr, bad_temp)
def test_add_trcvr_bad_number_of_freqs():
"""Test error is raised if number of frequencies does not match input trcvr."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
bad_temp = np.ones((1, 51)) * units.K
pytest.raises(ValueError, dspec_object.add_trcvr, bad_temp)
def test_add_trcvr_vector():
"""Test an arry of trcvr quantity."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
good_temp = np.ones((1, 21)) * 9 * units.K
dspec_object.add_trcvr(good_temp)
expected_shape = (dspec_object.Nspws, dspec_object.Nfreqs)
assert expected_shape == dspec_object.trcvr.shape
def test_add_trcvr_init():
"""Test a scalar trcvr quantity is broadcast to the correct shape during init."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd, trcvr=9 * units.K)
expected_shape = (dspec_object.Nspws, dspec_object.Nfreqs)
assert expected_shape == dspec_object.trcvr.shape
def test_add_trcvr_init_error():
"""Test error is raised if trcvr is the only input to init."""
pytest.raises(ValueError, DelaySpectrum, trcvr=9 * units.K)
def test_spectrum_on_no_data():
"""Test error is raised if spectrum attempted to be taken with no data."""
dspec_object = DelaySpectrum()
pytest.raises(ValueError, dspec_object.calculate_delay_spectrum)
def test_noise_shape():
"""Test the generate noise and calculate_noise_power produce correct shape."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.trcvr = np.zeros_like(dspec_object.trcvr)
dspec_object.beam_area = np.ones_like(dspec_object.beam_area)
dspec_object.generate_noise()
assert (
dspec_object._noise_array.expected_shape(dspec_object)
== dspec_object.noise_array.shape
)
def test_noise_unit():
"""Test the generate noise and calculate_noise_power produce correct units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.trcvr = np.zeros_like(dspec_object.trcvr)
dspec_object.beam_area = np.ones_like(dspec_object.beam_area)
dspec_object.generate_noise()
assert dspec_object.noise_array.unit == units.Jy
def test_noise_amplitude():
"""Test noise amplitude with a fixed seed."""
np.random.seed(0)
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.trcvr = np.zeros_like(dspec_object.trcvr)
dspec_object.beam_area = np.ones_like(dspec_object.beam_area)
dspec_object.nsample_array = np.ones_like(dspec_object.nsample_array)
dspec_object.integration_time = np.ones_like(dspec_object.integration_time)
dspec_object.polarization_array = np.array([-5])
dspec_object.generate_noise()
var = np.var(dspec_object.noise_array, axis=(0, 1, 2, 3)).mean(0)
test_amplitude = (
180
* units.K
* np.power((dspec_object.freq_array.to("GHz") / (0.18 * units.GHz)), -2.55)
/ np.sqrt(np.diff(dspec_object.freq_array[0])[0].value)
).reshape(1, 1, dspec_object.Nfreqs)
test_amplitude *= dspec_object.beam_area / utils.jy_to_mk(dspec_object.freq_array)
test_var = test_amplitude.to("Jy") ** 2
# this was from running this test by hand
ratio = np.array(
[
[
1.07735447,
1.07082788,
1.07919504,
1.04992591,
1.02254714,
0.99884931,
0.94861011,
1.01908474,
1.03877442,
1.00549461,
1.09642801,
1.01100747,
1.0201933,
1.05762868,
0.95156612,
1.00190002,
1.00046522,
1.02796162,
1.04277506,
0.98373618,
1.01235802,
]
]
)
assert np.allclose(ratio, (test_var / var).value)
def test_delay_transform_units():
"""Test units after calling delay_transform are correct."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
dspec_object.delay_transform()
assert dspec_object.data_array.unit.is_equivalent(units.Jy * units.Hz)
dspec_object.delay_transform()
assert dspec_object.data_array.unit.is_equivalent(units.Jy)
def test_warning_from_uncalibrated_data():
"""Test scaling warning is raised when delay transforming uncalibrated data."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
uvd.vis_units = "uncalib"
warn_message = [
"Data is uncalibrated. Unable to covert noise array to unicalibrated units."
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object = DelaySpectrum(uvd)
warn_message = [
"Fourier Transforming uncalibrated data. Units will "
"not have physical meaning. "
"Data will be arbitrarily scaled."
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object.delay_transform()
def test_delay_transform_bad_data_type():
"""Test error is raised in delay_transform if data_type is bad."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uvd)
dspec_object.data_type = "test"
pytest.raises(ValueError, dspec_object.delay_transform)
def test_delay_spectrum_power_units():
"""Test the units on the output power are correct."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb, use_exact=True)
dspec_object.calculate_delay_spectrum()
assert (units.mK**2 * units.Mpc**3).is_equivalent(dspec_object.power_array.unit)
def test_delay_spectrum_power_shape():
"""Test the shape of the output power is correct."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=uvd)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
power_shape = (
dspec_object.Nspws,
dspec_object.Npols,
dspec_object.Nbls,
dspec_object.Nbls,
dspec_object.Ntimes,
dspec_object.Ndelays,
)
assert power_shape == dspec_object.power_array.shape
def test_delay_spectrum_power_shape_two_uvdata_objects_read():
"""Test the shape of the output power is correct when two uvdata objects read."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd] * 2)
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
power_shape = (
dspec_object.Nspws,
dspec_object.Npols,
dspec_object.Nbls,
dspec_object.Nbls,
dspec_object.Ntimes,
dspec_object.Ndelays,
)
assert power_shape == dspec_object.power_array.shape
def test_delay_spectrum_power_shape_two_spectral_windows():
"""Test the shape of the output power when multiple spectral windows given."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
power_shape = (
dspec_object.Nspws,
dspec_object.Npols,
dspec_object.Nbls,
dspec_object.Nbls,
dspec_object.Ntimes,
dspec_object.Ndelays,
)
assert power_shape == dspec_object.power_array.shape
def test_cosmological_units():
"""Test the units on cosmological parameters."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
assert dspec_object.k_perpendicular.unit.is_equivalent(1.0 / units.Mpc)
assert dspec_object.k_parallel.unit.is_equivalent(1.0 / units.Mpc)
def test_delay_spectrum_power_units_input_kelvin_str():
"""Test the units on the output power are correct when input kelvin*str."""
test_file = os.path.join(DATA_PATH, "paper_test_file_k_units.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (units.mK**2 * units.Mpc**3).is_equivalent(dspec_object.power_array.unit)
def test_delay_spectrum_power_units_input_uncalib():
"""Test the units on the output power are correct if input uncalib."""
test_file = os.path.join(DATA_PATH, "paper_test_file_uncalib_units.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
warn_message = [
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object = DelaySpectrum([test_uv_1, test_uv_2])
dspec_object.add_trcvr(144 * units.K)
warn_message = [
"Fourier Transforming uncalibrated data. "
"Units will not have physical meaning. "
"Data will be arbitrarily scaled."
]
with uvtest.check_warnings(UserWarning, match=warn_message):
dspec_object.calculate_delay_spectrum()
assert (units.Hz**2).is_equivalent(dspec_object.power_array.unit)
def test_delay_spectrum_noise_power_units():
"""Test the units on the output noise power are correct."""
test_file = os.path.join(DATA_PATH, "paper_test_file.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (units.mK**2 * units.Mpc**3).is_equivalent(dspec_object.noise_power.unit)
def test_delay_spectrum_thermal_power_units():
"""Test the units on the output thermal power are correct."""
test_file = os.path.join(DATA_PATH, "paper_test_file.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (units.mK**2 * units.Mpc**3).is_equivalent(
dspec_object.thermal_power.unit
)
def test_delay_spectrum_thermal_power_shape():
"""Test the shape of the output thermal power is correct."""
test_file = os.path.join(DATA_PATH, "paper_test_file.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (
dspec_object._thermal_power.expected_shape(dspec_object)
== dspec_object.thermal_power.shape
)
def test_multiple_polarization_file():
"""Test the units on cosmological parameters."""
testfile = os.path.join(DATA_PATH, "test_two_pol_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_multiple_pol.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
assert dspec_object.check()
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
def test_remove_cosmology():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object2 = copy.deepcopy(dspec_object)
dspec_object.calculate_delay_spectrum(littleh_units=True)
dspec_object.remove_cosmology()
assert dspec_object.power_array.unit.is_equivalent(units.Jy**2 * units.Hz**2)
dspec_object2.delay_transform()
dspec_object2.power_array = utils.cross_multiply_array(
array_1=dspec_object2.data_array[:, 0], axis=2
)
assert units.allclose(dspec_object2.power_array, dspec_object.power_array)
def test_remove_cosmology_no_cosmo():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.delay_transform()
dspec_object.power_array = utils.cross_multiply_array(
array_1=dspec_object.data_array[:, 0], axis=2
)
dspec_object.noise_power = utils.cross_multiply_array(
array_1=dspec_object.noise_array[:, 0], axis=2
)
dspec_object2 = copy.deepcopy(dspec_object)
dspec_object.remove_cosmology()
assert dspec_object.power_array.unit.is_equivalent(units.Jy**2 * units.Hz**2)
assert units.allclose(dspec_object2.power_array, dspec_object.power_array)
def test_remove_cosmology_cosmo_none():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.cosmology = None
with pytest.raises(ValueError) as cm:
dspec_object.remove_cosmology()
assert str(cm.value).startswith("Cannot remove cosmology of type")
def test_update_cosmology_units_and_shapes():
"""Test the check function on DelaySpectrum after changing cosmologies."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_error_if_not_cosmology_object():
"""Test update cosmology function errors if new cosmology is not a Cosmology object."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
bad_input = DummyClass()
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
pytest.raises(ValueError, dspec_object.update_cosmology, cosmology=bad_input)
def test_update_cosmology_unit_and_shape_kelvin_sr():
"""Test the check function after changing cosmolgies, input visibility Kelvin * sr."""
test_file = os.path.join(DATA_PATH, "paper_test_file_k_units.uvh5")
test_cosmo = Planck15
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_unit_and_shape_uncalib():
"""Test the check function after changing cosmolgies, input visibility uncalibrated."""
test_file = os.path.join(DATA_PATH, "paper_test_file_uncalib_units.uvh5")
test_cosmo = Planck15
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
warn_message = [
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object = DelaySpectrum([test_uv_1, test_uv_2])
dspec_object.add_trcvr(144 * units.K)
warn_message = [
"Fourier Transforming uncalibrated data. "
"Units will not have physical meaning. "
"Data will be arbitrarily scaled."
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_littleh_units():
"""Test the units can convert to 'littleh' units in python 3."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo, littleh_units=True)
assert dspec_object.check()
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit, test_unit
def test_update_cosmology_littleh_units_from_calc_delay_spectr():
"""Test the units can convert to 'littleh' units in python 3 passed through calculate_delay_spectrum."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo, littleh_units=True)
assert dspec_object.check()
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
def test_call_update_cosmology_twice():
"""Test cosmology can be updated at least twice in a row with littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=True)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.update_cosmology(test_cosmo2, littleh_units=True)
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_update_cosmology_twice_no_littleh():
"""Test cosmology can be updated at least twice in a row without littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=False)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.update_cosmology(test_cosmo2, littleh_units=False)
test_unit = units.mK**2 * units.Mpc**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_delay_spectrum_twice_no_littleh():
"""Test calculate_delay_spectrum can be called at least twice in a row without littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=False)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo2, littleh_units=False)
test_unit = units.mK**2 * units.Mpc**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_delay_spectrum_twice():
"""Test calculate_delay_spectrum can be called at least twice in a row."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=True)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo2, littleh_units=True)
test_unit = units.mK**2 * units.Mpc**3 / littleh**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
@pytest.mark.parametrize(
"input,err_type,err_message",
[
({"antenna_nums": [-1]}, ValueError, "Antenna -1 is not present in either"),
({"bls": []}, ValueError, "bls must be a list of tuples of antenna numbers"),
(
{"bls": [(0, 44), "test"]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [(1, "2")]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [("1", 2)]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [(1, 2, "xx")], "polarizations": "yy"},
ValueError,
"Cannot provide length-3 tuples and also",
),
(
{"bls": [(1, 2, 3)]},
ValueError,
"The third element in each bl must be a polarization",
),
({"bls": [(2, 3)]}, ValueError, "Baseline (2, 3) has no data associate"),
({"spws": ["pi"]}, ValueError, "Input spws must be an array_like of integers"),
({"spws": [5]}, ValueError, "Input spectral window values must be less"),
(
{"frequencies": [12] * units.Hz},
ValueError,
"Frequency 12.0 Hz not present in the frequency array.",
),
(
{"frequencies": [146798030.15625, 147290641.0, 151724138.59375] * units.Hz},
ValueError,
"Frequencies provided for selection will result in a non-rectangular",
),
(
{"delays": [12] * units.ns},
ValueError,
"The input delay 12.0 ns is not present in the delay_array.",
),
(
{"lsts": [7] * units.rad},
ValueError,
"The input lst 7.0 rad is not present in the lst_array.",
),
(
{"lst_range": [0, 2, 3] * units.rad},
ValueError,
"Parameter lst_range must be an Astropy Quantity object with size 2 ",
),
(
{"polarizations": ["pU"]},
ValueError,
"Polarization 3 not present in polarization_array.",
),
(
{"delay_chans": np.arange(11).tolist(), "delays": -96.66666543 * units.ns},
ValueError,
"The intersection of the input delays and delay_chans ",
),
(
{"uv_index": np.arange(5).tolist()},
ValueError,
"The number of UVData objects in this DelaySpectrum object",
),
],
)
def test_select_preprocess_errors(ds_from_uvfits, input, err_type, err_message):
"""Test Errors raised by _select_preprocess."""
ds = ds_from_uvfits
ds.delay_transform()
with pytest.raises(err_type) as cm:
ds.select(**input)
assert str(cm.value).startswith(err_message)
@pytest.mark.parametrize(
"input",
[
{"antenna_nums": [0, 44]},
{"bls": (0, 26)}, # if statement looking for just one input that is a tuple
{"bls": (26, 0)}, # reverse the baseline to see if it is taken
{"bls": [(0, 26), (1, 4)]},
{"bls": [(0, 26), 69637]},
{"bls": [(0, 26, "pI"), (1, 4, "pI")]},
{"antenna_nums": [0, 44], "bls": [157697]}, # Mix bls and antenna_nums
{"freq_chans": | np.arange(11) | numpy.arange |
#!python3
## create and write depth file for HYCOM
## include utilities function
import sys
import os
from os.path import join
### might not need next line in older or newer version of proplot
os.environ['PROJ_LIB'] = '/Users/abozec/opt/anaconda3/share/proj'
import proplot as plot
import numpy as np
iodir='/Users/abozec/Documents/GitHub/BB86_PACKAGE/PYTHON/'
sys.path.append(iodir)
from hycom.io import write_hycom_grid
# define path
io=join(iodir+'../topo/')
file_grid_new='regional.grid.BB86.python'
## size of the domain
idm = 101 ; jdm = 101
## longitude/latitude starting point + resolution (in degrees) (NB: those
## variables are not used in HYCOM)
ini_lon = 0. ; ini_lat = 0.
res = 0.20
## scale dx and dy (in m) (used in HYCOM)
dx = 20e3 ; dy = dx
## grid type
mapflg=0 ## uniform or mercator
######### END of the USER inputs #############
## missing value in HYCOM
vmiss = 2.**100
## Get the p-point the grid
plon = np.zeros([jdm, idm])
plat = np.zeros([jdm, idm])
## Longitude
plon[:, 0] = ini_lon
for i in np.arange(idm-1)+1:
plon[:,i] = plon[:, i-1] + res
## Latitude
plat[0, :] = ini_lat
for j in np.arange(jdm-1)+1:
plat[j, :] = plat[j-1, :] + res
print('p-points grid OK')
## Declaration of the grid tabs
qlon = np.zeros([jdm,idm])
qlat = np.zeros([jdm,idm])
ulon = np.zeros([jdm,idm])
ulat = np.zeros([jdm,idm])
vlon = np.zeros([jdm,idm])
vlat = np.zeros([jdm,idm])
pang = np.zeros([jdm,idm])
pscx = np.zeros([jdm,idm])
pscy = np.zeros([jdm,idm])
qscx = np.zeros([jdm,idm])
qscy = np.zeros([jdm,idm])
uscx = np.zeros([jdm,idm])
uscy = np.zeros([jdm,idm])
vscx = np.zeros([jdm,idm])
vscy = np.zeros([jdm,idm])
cori = np.zeros([jdm,idm])
pasp = | np.zeros([jdm,idm]) | numpy.zeros |
'''
Created on Jul 3, 2019
@author: Hazem
'''
import numpy as np
import time
from numba import njit,guvectorize,float64
import scipy.optimize as opt
from matplotlib import pyplot as plt
import pdb
import csv
#Set
t = np.arange(1, 101)
NT = len(t)
#Parameters
fosslim = 6000 # Maximum cumulative extraction fossil fuels (GtC); denoted by CCum
tstep = 5 # Years per Period
ifopt = 0 # Indicator where optimized is 1 and base is 0
#Preferences
elasmu = 1.45 # Elasticity of marginal utility of consumption
prstp = 0.015 # Initial rate of social time preference per year
#** Population and technology
gama = 0.300 # Capital elasticity in production function /.300 /
pop0 = 7403 # Initial world population 2015 (millions) /7403 /
popadj = 0.134 # Growth rate to calibrate to 2050 pop projection /0.134/
popasym = 11500 # Asymptotic population (millions) /11500/
dk = 0.100 # Depreciation rate on capital (per year) /.100 /
q0 = 105.5 # Initial world gross output 2015 (trill 2010 USD) /105.5/
k0 = 223 # Initial capital value 2015 (trill 2010 USD) /223 /
a0 = 5.115 # Initial level of total factor productivity /5.115/
ga0 = 0.076 # Initial growth rate for TFP per 5 years /0.076/
dela = 0.005 # Decline rate of TFP per 5 years /0.005/
#** Emissions parameters
gsigma1 = -0.0152 # Initial growth of sigma (per year) /-0.0152/
dsig = -0.001 # Decline rate of decarbonization (per period) /-0.001 /
eland0 = 2.6 # Carbon emissions from land 2015 (GtCO2 per year) / 2.6 /
deland = 0.115 # Decline rate of land emissions (per period) / .115 /
e0 = 35.85 # Industrial emissions 2015 (GtCO2 per year) /35.85 /
miu0 = 0.03 # Initial emissions control rate for base case 2015 /.03 /
#** Carbon cycle
#* Initial Conditions
mat0 = 851 # Initial Concentration in atmosphere 2015 (GtC) /851 /
mu0 = 460 # Initial Concentration in upper strata 2015 (GtC) /460 /
ml0 = 1740 # Initial Concentration in lower strata 2015 (GtC) /1740 /
mateq = 588 # mateq Equilibrium concentration atmosphere (GtC) /588 /
mueq = 360 # mueq Equilibrium concentration in upper strata (GtC) /360 /
mleq = 1720 # mleq Equilibrium concentration in lower strata (GtC) /1720 /
#* Flow paramaters, denoted by Phi_ij in the model
b12 = 0.12 # Carbon cycle transition matrix /.12 /
b23 = 0.007 # Carbon cycle transition matrix /0.007/
#* These are for declaration and are defined later
b11 = None # Carbon cycle transition matrix
b21 = None # Carbon cycle transition matrix
b22 = None # Carbon cycle transition matrix
b32 = None # Carbon cycle transition matrix
b33 = None # Carbon cycle transition matrix
sig0 = None # Carbon intensity 2010 (kgCO2 per output 2005 USD 2010)
#** Climate model parameters
t2xco2 = 3.1 # Equilibrium temp impact (oC per doubling CO2) / 3.1 /
fex0 = 0.5 # 2015 forcings of non-CO2 GHG (Wm-2) / 0.5 /
fex1 = 1.0 # 2100 forcings of non-CO2 GHG (Wm-2) / 1.0 /
tocean0 = 0.0068 # Initial lower stratum temp change (C from 1900) /.0068/
tatm0 = 0.85 # Initial atmospheric temp change (C from 1900) /0.85/
c1 = 0.1005 # Climate equation coefficient for upper level /0.1005/
c3 = 0.088 # Transfer coefficient upper to lower stratum /0.088/
c4 = 0.025 # Transfer coefficient for lower level /0.025/
fco22x = 3.6813 # eta in the model; Eq.22 : Forcings of equilibrium CO2 doubling (Wm-2) /3.6813 /
#** Climate damage parameters
a10 = 0 # Initial damage intercept /0 /
a20 = None # Initial damage quadratic term
a1 = 0 # Damage intercept /0 /
a2 = 0.00236 # Damage quadratic term /0.00236/
a3 = 2.00 # Damage exponent /2.00 /
#** Abatement cost
expcost2 = 2.6 # Theta2 in the model, Eq. 10 Exponent of control cost function / 2.6 /
pback = 550 # Cost of backstop 2010$ per tCO2 2015 / 550 /
gback = 0.025 # Initial cost decline backstop cost per period / .025/
limmiu = 1.2 # Upper limit on control rate after 2150 / 1.2 /
tnopol = 45 # Period before which no emissions controls base / 45 /
cprice0 = 2 # Initial base carbon price (2010$ per tCO2) / 2 /
gcprice = 0.02 # Growth rate of base carbon price per year /.02 /
#** Scaling and inessential parameters
#* Note that these are unnecessary for the calculations
#* They ensure that MU of first period's consumption =1 and PV cons = PV utilty
scale1 = 0.0302455265681763 # Multiplicative scaling coefficient /0.0302455265681763 /
scale2 = -10993.704 # Additive scaling coefficient /-10993.704/;
#* Parameters for long-run consistency of carbon cycle
#(Question)
b11 = 1 - b12
b21 = b12*mateq/mueq
b22 = 1 - b21 - b23
b32 = b23*mueq/mleq
b33 = 1 - b32
#* Further definitions of parameters
a20 = a2
sig0 = e0/(q0*(1-miu0)) #From Eq. 14
lam = fco22x/ t2xco2 #From Eq. 25
l = np.zeros(NT)
l[0] = pop0 #Labor force
al = np.zeros(NT)
al[0] = a0
gsig = np.zeros(NT)
gsig[0] = gsigma1
sigma = np.zeros(NT)
sigma[0]= sig0
ga = ga0 * np.exp(-dela*5*(t-1)) #TFP growth rate dynamics, Eq. 7
pbacktime = pback * (1-gback)**(t-1) #Backstop price
etree = eland0*(1-deland)**(t-1) #Emissions from deforestration
rr = 1/((1+prstp)**(tstep*(t-1))) #Eq. 3
#The following three equations define the exogenous radiative forcing; used in Eq. 23
forcoth = np.full(NT,fex0)
forcoth[0:18] = forcoth[0:18] + (1/17)*(fex1-fex0)*(t[0:18]-1)
forcoth[18:NT] = forcoth[18:NT] + (fex1-fex0)
optlrsav = (dk + .004)/(dk + .004*elasmu + prstp)*gama #Optimal long-run savings rate used for transversality (Question)
cost1 = np.zeros(NT)
cumetree = np.zeros(NT)
cumetree[0] = 100
cpricebase = cprice0*(1+gcprice)**(5*(t-1))
@njit('(float64[:], int32)')
def InitializeLabor(il,iNT):
for i in range(1,iNT):
il[i] = il[i-1]*(popasym / il[i-1])**popadj
@njit('(float64[:], int32)')
def InitializeTFP(ial,iNT):
for i in range(1,iNT):
ial[i] = ial[i-1]/(1-ga[i-1])
@njit('(float64[:], int32)')
def InitializeGrowthSigma(igsig,iNT):
for i in range(1,iNT):
igsig[i] = igsig[i-1]*((1+dsig)**tstep)
@njit('(float64[:], float64[:],float64[:],int32)')
def InitializeSigma(isigma,igsig,icost1,iNT):
for i in range(1,iNT):
isigma[i] = isigma[i-1] * np.exp(igsig[i-1] * tstep)
icost1[i] = pbacktime[i] * isigma[i] / expcost2 /1000
@njit('(float64[:], int32)')
def InitializeCarbonTree(icumetree,iNT):
for i in range(1,iNT):
icumetree[i] = icumetree[i-1] + etree[i-1]*(5/3.666)
"""
Functions of the model
"""
"""
First: Functions related to emissions of carbon and weather damages
"""
# Retuns the total carbon emissions; Eq. 18
@njit('float64(float64[:],int32)')
def fE(iEIND,index):
return iEIND[index] + etree[index]
#Eq.14: Determines the emission of carbon by industry EIND
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fEIND(iYGROSS, iMIU, isigma,index):
return isigma[index] * iYGROSS[index] * (1 - iMIU[index])
#Cumulative industrial emission of carbon
@njit('float64(float64[:],float64[:],int32)')
def fCCA(iCCA,iEIND,index):
return iCCA[index-1] + iEIND[index-1] * 5 / 3.666
#Cumulative total carbon emission
@njit('float64(float64[:],float64[:],int32)')
def fCCATOT(iCCA,icumetree,index):
return iCCA[index] + icumetree[index]
#Eq. 22: the dynamics of the radiative forcing
@njit('float64(float64[:],int32)')
def fFORC(iMAT,index):
return fco22x * np.log(iMAT[index]/588.000)/np.log(2) + forcoth[index]
# Dynamics of Omega; Eq.9
@njit('float64(float64[:],int32)')
def fDAMFRAC(iTATM,index):
return a1*iTATM[index] + a2*iTATM[index]**a3
#Calculate damages as a function of Gross industrial production; Eq.8
@njit('float64(float64[:],float64[:],int32)')
def fDAMAGES(iYGROSS,iDAMFRAC,index):
return iYGROSS[index] * iDAMFRAC[index]
#Dynamics of Lambda; Eq. 10 - cost of the reudction of carbon emission (Abatement cost)
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fABATECOST(iYGROSS,iMIU,icost1,index):
return iYGROSS[index] * icost1[index] * iMIU[index]**expcost2
#Marginal Abatement cost
@njit('float64(float64[:],int32)')
def fMCABATE(iMIU,index):
return pbacktime[index] * iMIU[index]**(expcost2-1)
#Price of carbon reduction
@njit('float64(float64[:],int32)')
def fCPRICE(iMIU,index):
return pbacktime[index] * (iMIU[index])**(expcost2-1)
#Eq. 19: Dynamics of the carbon concentration in the atmosphere
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fMAT(iMAT,iMU,iE,index):
if(index == 0):
return mat0
else:
return iMAT[index-1]*b11 + iMU[index-1]*b21 + iE[index-1] * 5 / 3.666
#Eq. 21: Dynamics of the carbon concentration in the ocean LOW level
@njit('float64(float64[:],float64[:],int32)')
def fML(iML,iMU,index):
if(index == 0):
return ml0
else:
return iML[index-1] * b33 + iMU[index-1] * b23
#Eq. 20: Dynamics of the carbon concentration in the ocean UP level
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fMU(iMAT,iMU,iML,index):
if(index == 0):
return mu0
else:
return iMAT[index-1]*b12 + iMU[index-1]*b22 + iML[index-1]*b32
#Eq. 23: Dynamics of the atmospheric temperature
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fTATM(iTATM,iFORC,iTOCEAN,index):
if(index == 0):
return tatm0
else:
return iTATM[index-1] + c1 * (iFORC[index] - (fco22x/t2xco2) * iTATM[index-1] - c3 * (iTATM[index-1] - iTOCEAN[index-1]))
#Eq. 24: Dynamics of the ocean temperature
@njit('float64(float64[:],float64[:],int32)')
def fTOCEAN(iTATM,iTOCEAN,index):
if(index == 0):
return tocean0
else:
return iTOCEAN[index-1] + c4 * (iTATM[index-1] - iTOCEAN[index-1])
"""
Second: Function related to economic variables
"""
#The total production without climate losses denoted previously by YGROSS
@njit('float64(float64[:],float64[:],float64[:],int32)')
def fYGROSS(ial,il,iK,index):
return ial[index] * ((il[index]/1000)**(1-gama)) * iK[index]**gama
#The production under the climate damages cost
@njit('float64(float64[:],float64[:],int32)')
def fYNET(iYGROSS, iDAMFRAC, index):
return iYGROSS[index] * (1 - iDAMFRAC[index])
#Production after abatement cost
@njit('float64(float64[:],float64[:],int32)')
def fY(iYNET,iABATECOST,index):
return iYNET[index] - iABATECOST[index]
#Consumption Eq. 11
@njit('float64(float64[:],float64[:],int32)')
def fC(iY,iI,index):
return iY[index] - iI[index]
#Per capita consumption, Eq. 12
@njit('float64(float64[:],float64[:],int32)')
def fCPC(iC,il,index):
return 1000 * iC[index] / il[index]
#Saving policy: investment
@njit('float64(float64[:],float64[:],int32)')
def fI(iS,iY,index):
return iS[index] * iY[index]
#Capital dynamics Eq. 13
@njit('float64(float64[:],float64[:],int32)')
def fK(iK,iI,index):
if(index == 0):
return k0
else:
return (1-dk)**tstep * iK[index-1] + tstep * iI[index-1]
#Interest rate equation; Eq. 26 added in personal notes
@njit('float64(float64[:],int32)')
def fRI(iCPC,index):
return (1 + prstp) * (iCPC[index+1]/iCPC[index])**(elasmu/tstep) - 1
#Periodic utility: A form of Eq. 2
@njit('float64(float64[:],float64[:],int32)')
def fCEMUTOTPER(iPERIODU,il,index):
return iPERIODU[index] * il[index] * rr[index]
#The term between brackets in Eq. 2
@njit('float64(float64[:],float64[:],int32)')
def fPERIODU(iC,il,index):
return ((iC[index]*1000/il[index])**(1-elasmu) - 1) / (1 - elasmu) - 1
#utility function
@guvectorize([(float64[:], float64[:])], '(n), (m)')
def fUTILITY(iCEMUTOTPER, resUtility):
resUtility[0] = tstep * scale1 * np.sum(iCEMUTOTPER) + scale2
"""
In this part we implement the objective function
"""
# * Control rate limits
MIU_lo = np.full(NT,0.01)
MIU_up = np.full(NT,limmiu)
MIU_up[0:29] = 1
MIU_lo[0] = miu0
MIU_up[0] = miu0
MIU_lo[MIU_lo==MIU_up] = 0.99999*MIU_lo[MIU_lo==MIU_up]
bnds1=[]
for i in range(NT):
bnds1.append((MIU_lo[i],MIU_up[i]))
# * Control variables
lag10 = t > NT - 10
S_lo = np.full(NT,1e-1)
S_lo[lag10] = optlrsav
S_up = np.full(NT,0.9)
S_up[lag10] = optlrsav
S_lo[S_lo==S_up] = 0.99999*S_lo[S_lo==S_up]
bnds2=[]
for i in range(NT):
bnds2.append((S_lo[i],S_up[i]))
# Arbitrary starting values for the control variables:
S_start = np.full(NT,0.2)
S_start[S_start < S_lo] = S_lo[S_start < S_lo]
S_start[S_start > S_up] = S_lo[S_start > S_up]
MIU_start = 0.99*MIU_up
MIU_start[MIU_start < MIU_lo] = MIU_lo[MIU_start < MIU_lo]
MIU_start[MIU_start > MIU_up] = MIU_up[MIU_start > MIU_up]
K = np.zeros(NT)
YGROSS = np.zeros(NT)
EIND = np.zeros(NT)
E = np.zeros(NT)
CCA = np.zeros(NT)
CCATOT = np.zeros(NT)
MAT = np.zeros(NT)
ML = np.zeros(NT)
MU = np.zeros(NT)
FORC = np.zeros(NT)
TATM = np.zeros(NT)
TOCEAN = np.zeros(NT)
DAMFRAC = np.zeros(NT)
DAMAGES = np.zeros(NT)
ABATECOST = np.zeros(NT)
MCABATE = np.zeros(NT)
CPRICE = np.zeros(NT)
YNET = np.zeros(NT)
Y = np.zeros(NT)
I = | np.zeros(NT) | numpy.zeros |
'''This file contains a set of utility functions that
help with positioning, building a game board, and
encoding data to be used later'''
import itertools
import json
import random
import os
import copy
from jsonmerge import Merger
from gym import spaces
import numpy as np
from . import constants
class PommermanJSONEncoder(json.JSONEncoder):
'''A helper class to encode state data into a json object'''
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, constants.Item):
return obj.value
elif isinstance(obj, constants.Action):
return obj.value
elif isinstance(obj, np.int64):
return int(obj)
elif hasattr(obj, 'to_json'):
return obj.to_json()
elif isinstance(obj, spaces.Discrete):
return obj.n
elif isinstance(obj, spaces.Tuple):
return [space.n for space in obj.spaces]
return json.JSONEncoder.default(self, obj)
def make_board(size, num_rigid=0, num_wood=0):
"""Make the random but symmetric board.
The numbers refer to the Item enum in constants. This is:
0 - passage
1 - rigid wall
2 - wood wall
3 - bomb
4 - flames
5 - fog
6 - extra bomb item
7 - extra firepower item
8 - kick
9 - skull
10 - 13: agents
Args:
size: The dimension of the board, i.e. it's sizeXsize.
num_rigid: The number of rigid walls on the board. This should be even.
num_wood: Similar to above but for wood walls.
Returns:
board: The resulting random board.
"""
def lay_wall(value, num_left, coordinates, board):
'''Lays all of the walls on a board'''
x, y = random.sample(coordinates, 1)[0]
coordinates.remove((x, y))
coordinates.remove((y, x))
board[x, y] = value
board[y, x] = value
num_left -= 2
return num_left
def make(size, num_rigid, num_wood):
'''Constructs a game/board'''
# Initialize everything as a passage.
board = np.ones((size,
size)).astype(np.uint8) * constants.Item.Passage.value
# Gather all the possible coordinates to use for walls.
coordinates = set([
(x, y) for x, y in \
itertools.product(range(size), range(size)) \
if x != y])
# Set the players down. Exclude them from coordinates.
# Agent0 is in top left. Agent1 is in bottom left.
# Agent2 is in bottom right. Agent 3 is in top right.
board[1, 1] = constants.Item.Agent0.value
board[size - 2, 1] = constants.Item.Agent1.value
board[size - 2, size - 2] = constants.Item.Agent2.value
board[1, size - 2] = constants.Item.Agent3.value
agents = [(1, 1), (size - 2, 1), (1, size - 2), (size - 2, size - 2)]
for position in agents:
if position in coordinates:
coordinates.remove(position)
# Exclude breathing room on either side of the agents.
for i in range(2, 4):
coordinates.remove((1, i))
coordinates.remove((i, 1))
coordinates.remove((1, size - i - 1))
coordinates.remove((size - i - 1, 1))
coordinates.remove((size - 2, size - i - 1))
coordinates.remove((size - i - 1, size - 2))
coordinates.remove((i, size - 2))
coordinates.remove((size - 2, i))
# Lay down wooden walls providing guaranteed passage to other agents.
wood = constants.Item.Wood.value
for i in range(4, size - 4):
board[1, i] = wood
board[size - i - 1, 1] = wood
board[size - 2, size - i - 1] = wood
board[size - i - 1, size - 2] = wood
coordinates.remove((1, i))
coordinates.remove((size - i - 1, 1))
coordinates.remove((size - 2, size - i - 1))
coordinates.remove((size - i - 1, size - 2))
num_wood -= 4
# Lay down the rigid walls.
while num_rigid > 0:
num_rigid = lay_wall(constants.Item.Rigid.value, num_rigid,
coordinates, board)
# Lay down the wooden walls.
while num_wood > 0:
num_wood = lay_wall(constants.Item.Wood.value, num_wood,
coordinates, board)
return board, agents
assert (num_rigid % 2 == 0)
assert (num_wood % 2 == 0)
board, agents = make(size, num_rigid, num_wood)
# Make sure it's possible to reach most of the passages.
while len(inaccessible_passages(board, agents)) > 4:
board, agents = make(size, num_rigid, num_wood)
return board
def make_items(board, num_items):
'''Lays all of the items on the board'''
item_positions = {}
while num_items > 0:
row = random.randint(0, len(board) - 1)
col = random.randint(0, len(board[0]) - 1)
if board[row, col] != constants.Item.Wood.value:
continue
if (row, col) in item_positions:
continue
item_positions[(row, col)] = random.choice([
constants.Item.ExtraBomb, constants.Item.IncrRange,
constants.Item.Kick
]).value
num_items -= 1
return item_positions
def inaccessible_passages(board, agent_positions):
"""Return inaccessible passages on this board."""
seen = set()
agent_position = agent_positions.pop()
passage_positions = np.where(board == constants.Item.Passage.value)
positions = list(zip(passage_positions[0], passage_positions[1]))
Q = [agent_position]
while Q:
row, col = Q.pop()
for (i, j) in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
next_position = (row + i, col + j)
if next_position in seen:
continue
if not position_on_board(board, next_position):
continue
if position_is_rigid(board, next_position):
continue
if next_position in positions:
positions.pop(positions.index(next_position))
if not len(positions):
return []
seen.add(next_position)
Q.append(next_position)
return positions
def is_valid_direction(board, position, direction, invalid_values=None):
'''Determins if a move is in a valid direction'''
row, col = position
if invalid_values is None:
invalid_values = [item.value for item in \
[constants.Item.Rigid, constants.Item.Wood]]
if constants.Action(direction) == constants.Action.Stop:
return True
if constants.Action(direction) == constants.Action.Up:
return row - 1 >= 0 and board[row - 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Down:
return row + 1 < len(board) and board[row +
1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Left:
return col - 1 >= 0 and board[row][col - 1] not in invalid_values
if constants.Action(direction) == constants.Action.Right:
return col + 1 < len(board[0]) and \
board[row][col+1] not in invalid_values
raise constants.InvalidAction("We did not receive a valid direction: ",
direction)
def _position_is_item(board, position, item):
'''Determins if a position holds an item'''
return board[position] == item.value
def position_is_flames(board, position):
'''Determins if a position has flames'''
return _position_is_item(board, position, constants.Item.Flames)
def position_is_bomb(bombs, position):
"""Check if a given position is a bomb.
We don't check the board because that is an unreliable source. An agent
may be obscuring the bomb on the board.
"""
for bomb in bombs:
if position == bomb.position:
return True
return False
def position_is_powerup(board, position):
'''Determins is a position has a powerup present'''
powerups = [
constants.Item.ExtraBomb, constants.Item.IncrRange, constants.Item.Kick
]
item_values = [item.value for item in powerups]
return board[position] in item_values
def position_is_wall(board, position):
'''Determins if a position is a wall tile'''
return position_is_rigid(board, position) or \
position_is_wood(board, position)
def position_is_passage(board, position):
'''Determins if a position is passage tile'''
return _position_is_item(board, position, constants.Item.Passage)
def position_is_rigid(board, position):
'''Determins if a position has a rigid tile'''
return _position_is_item(board, position, constants.Item.Rigid)
def position_is_wood(board, position):
'''Determins if a position has a wood tile'''
return _position_is_item(board, position, constants.Item.Wood)
def position_is_agent(board, position):
'''Determins if a position has an agent present'''
return board[position] in [
constants.Item.Agent0.value, constants.Item.Agent1.value,
constants.Item.Agent2.value, constants.Item.Agent3.value
]
def position_is_enemy(board, position, enemies):
'''Determins if a position is an enemy'''
return constants.Item(board[position]) in enemies
# TODO: Fix this so that it includes the teammate.
def position_is_passable(board, position, enemies):
'''Determins if a possible can be passed'''
return all([
any([
position_is_agent(board, position),
position_is_powerup(board, position),
position_is_passage(board, position)
]), not position_is_enemy(board, position, enemies)
])
def position_is_fog(board, position):
'''Determins if a position is fog'''
return _position_is_item(board, position, constants.Item.Fog)
def agent_value(id_):
'''Gets the state value based off of agents "name"'''
return getattr(constants.Item, 'Agent%d' % id_).value
def position_in_items(board, position, items):
'''Dtermines if the current positions has an item'''
return any([_position_is_item(board, position, item) for item in items])
def position_on_board(board, position):
'''Determines if a positions is on the board'''
x, y = position
return all([len(board) > x, len(board[0]) > y, x >= 0, y >= 0])
def get_direction(position, next_position):
"""Get the direction such that position --> next_position.
We assume that they are adjacent.
"""
x, y = position
next_x, next_y = next_position
if x == next_x:
if y < next_y:
return constants.Action.Right
else:
return constants.Action.Left
elif y == next_y:
if x < next_x:
return constants.Action.Down
else:
return constants.Action.Up
raise constants.InvalidAction(
"We did not receive a valid position transition.")
def get_next_position(position, direction):
'''Returns the next position coordinates'''
x, y = position
if direction == constants.Action.Right:
return (x, y + 1)
elif direction == constants.Action.Left:
return (x, y - 1)
elif direction == constants.Action.Down:
return (x + 1, y)
elif direction == constants.Action.Up:
return (x - 1, y)
elif direction == constants.Action.Stop:
return (x, y)
raise constants.InvalidAction("We did not receive a valid direction.")
def make_np_float(feature):
'''Converts an integer feature space into a floats'''
return np.array(feature).astype(np.float32)
def join_json_state(record_json_dir, agents, finished_at, config, info):
'''Combines all of the json state files into one'''
json_schema = {"properties": {"state": {"mergeStrategy": "append"}}}
json_template = {
"agents": agents,
"finished_at": finished_at,
"config": config,
"result": {
"name": info['result'].name,
"id": info['result'].value
}
}
if info['result'] is not constants.Result.Tie:
json_template['winners'] = info['winners']
json_template['state'] = []
merger = Merger(json_schema)
base = merger.merge({}, json_template)
for root, dirs, files in os.walk(record_json_dir):
for name in files:
path = os.path.join(record_json_dir, name)
if name.endswith('.json') and "game_state" not in name:
with open(path) as data_file:
data = json.load(data_file)
head = {"state": [data]}
base = merger.merge(base, head)
with open(os.path.join(record_json_dir, 'game_state.json'), 'w') as f:
f.write(json.dumps(base, sort_keys=True, indent=4))
for root, dirs, files in os.walk(record_json_dir):
for name in files:
if "game_state" not in name:
os.remove(os.path.join(record_json_dir, name))
def softmax(x):
x = np.array(x)
x -= np.max(x, axis=-1, keepdims=True) # For numerical stability
exps = np.exp(x)
return exps / np.sum(exps, axis=-1, keepdims=True)
def update_agent_memory(cur_memory, cur_obs):
"""
Update agent's memory of the board
:param cur_memory: Current memory including board, bomb_life, and bomb_blast_strength
:param cur_obs: Current observation of the agent, including the above
:return: The new memory
"""
def _get_explosion_range(row, col, blast_strength_map):
strength = int(blast_strength_map[row, col])
indices = {
'up': ([row - i, col] for i in range(1, strength)),
'down': ([row + i, col] for i in range(strength)),
'left': ([row, col - i] for i in range(1, strength)),
'right': ([row, col + i] for i in range(1, strength))
}
return indices
# Note: all three 11x11 boards are numpy arrays
if cur_memory is None:
new_memory = {
'bomb_life': np.copy(cur_obs['bomb_life']),
'bomb_blast_strength': np.copy(cur_obs['bomb_blast_strength']),
'board': np.copy(cur_obs['board'])
}
return new_memory
# Work on a copy and keep original unchanged
cur_memory = copy.deepcopy(cur_memory)
# Update history by incrementing timestep by 1
board = cur_memory['board']
bomb_life = cur_memory['bomb_life']
bomb_blast_strength = cur_memory['bomb_blast_strength']
# Decrease bomb_life by 1
original_bomb_life = np.copy(bomb_life)
np.putmask(bomb_life, bomb_life > 0, bomb_life - 1)
# Find out which bombs are going to explode
exploding_bomb_pos = np.logical_xor(original_bomb_life, bomb_life)
non_exploding_bomb_pos = np.logical_and(bomb_life, np.ones_like(bomb_life))
has_explosions = exploding_bomb_pos.any()
# Map to record which positions will become flames
flamed_positions = np.zeros_like(board)
while has_explosions:
has_explosions = False
# For each bomb
for row, col in zip(*exploding_bomb_pos.nonzero()):
# For each direction
for direction, indices in _get_explosion_range(row, col, bomb_blast_strength).items():
# For each location along that direction
for r, c in indices:
if not position_on_board(board, (r, c)):
break
# Stop when reaching a wall
if board[r, c] == constants.Item.Rigid.value:
break
# Otherwise the position is flamed
flamed_positions[r, c] = 1
# Stop when reaching a wood
if board[r, c] == constants.Item.Wood.value:
break
# Check if other non-exploding bombs are triggered
exploding_bomb_pos = np.zeros_like(exploding_bomb_pos)
for row, col in zip(*non_exploding_bomb_pos.nonzero()):
if flamed_positions[row, col]:
has_explosions = True
exploding_bomb_pos[row, col] = True
non_exploding_bomb_pos[row, col] = False
new_memory = dict()
# Update bomb_life map
new_memory['bomb_life'] = np.where(flamed_positions == 0, bomb_life, 0)
# Update bomb_strength map
new_memory['bomb_blast_strength'] = np.where(flamed_positions == 0, bomb_blast_strength, 0)
# Update Board
# If board from observation has fog value, do nothing &
# keep original updated history.
# Otherwise, overwrite history by observation.
new_memory['board'] = np.where(flamed_positions == 0, cur_memory['board'], constants.Item.Passage.value)
# Overlay agent's newest observations onto the memory
obs_board = cur_obs['board']
for r, c in zip(*np.where(obs_board != constants.Item.Fog.value)):
# board[r, c] = obs_board[r, c] if obs_board[r, c] in self.memory_values else 0
new_memory['board'][r, c] = obs_board[r, c]
new_memory['bomb_life'][r, c] = cur_obs['bomb_life'][r, c]
new_memory['bomb_blast_strength'][r, c] = cur_obs['bomb_blast_strength'][r, c]
# For invisible parts of the memory, only keep useful information
for r, c in zip(*np.where(obs_board == constants.Item.Fog.value)):
if new_memory['board'][r, c] not in constants.MEMORY_VALS:
new_memory['board'][r, c] = constants.Item.Passage.value
return new_memory
def combine_agent_obs_and_memory(memory, cur_obs):
"""
Returns an extended observation of the agent
by incorporating its memory.
NOTE: Assumes the memory is up-to-date (i.e. called `update_agent_memory()`)
:param memory: The agent's memory of the game, including board, bomb life, and blast strength
:param cur_obs: The agent's current observation object
:return: The new, extended observation
"""
if memory is None:
return cur_obs
extended_obs = copy.deepcopy(cur_obs)
for map in ['bomb_life', 'bomb_blast_strength', 'board']:
extended_obs[map] = np.copy(memory[map])
return extended_obs
def convert_to_model_input(agent_id, history):
# History Observation
board_obs = history['board']
bomb_blast_strength_obs = history['bomb_blast_strength']
bomb_life_obs = history['bomb_life']
# Model Input
passage_input = np.zeros([11,11])
wall_input = np.zeros([11,11])
wood_input = np.zeros([11,11])
self_input = np.zeros([11,11])
friend_input = np.zeros([11,11])
enemy_input = np.zeros([11,11])
flame_input = np.zeros([11,11])
extra_bomb_input = np.zeros([11,11])
increase_range_input = np.zeros([11,11])
kick_input = np.zeros([11,11])
fog_input = np.zeros([11,11])
can_kick_input = np.zeros([11,11])
self_ammo_input = np.zeros([11,11])
passage_input[board_obs==0] = 1
wall_input[board_obs==1] = 1
wood_input[board_obs==2] = 1
self_input[board_obs==agent_id+10] = 1
friend_input[board_obs == (agent_id+2)%4 + 10] = 1
enemy_input[board_obs == (agent_id+1)%4 + 10] = 1
enemy_input[board_obs == (agent_id+3)%4 + 10] = 1
flame_input[board_obs==4] = 1
extra_bomb_input[board_obs==6] = 1
increase_range_input[board_obs==7] = 1
kick_input[board_obs==8] = 1
fog_input[board_obs==5] = 1
if history['can_kick']:
can_kick_input = np.ones([11,11])
self_blast_strength_input = np.full((11,11), history['blast_strength'])
self_ammo_input = np.full((11,11), history['ammo'])
bomb_input = get_bomb_input(bomb_blast_strength_obs, bomb_life_obs, board_obs)
# Stack Inputs
model_input = []
model_input.append(passage_input)
model_input.append(wall_input)
model_input.append(wood_input)
model_input.append(bomb_input)
model_input.append(flame_input)
model_input.append(fog_input)
model_input.append(extra_bomb_input)
model_input.append(increase_range_input)
model_input.append(kick_input)
model_input.append(can_kick_input)
model_input.append(self_blast_strength_input)
model_input.append(self_ammo_input)
model_input.append(self_input)
model_input.append(friend_input)
model_input.append(enemy_input)
return np.array(model_input)
def get_bomb_input(bomb_blast_strength_obs, bomb_life_obs, board_obs):
bomb_input = np.zeros([11,11])
bomb_set = {}
for i in range(10,0,-1):
bomb_x_list, bomb_y_list = np.where(bomb_life_obs==i)
for j in range(len(bomb_x_list)):
bomb_x = bomb_x_list[j]
bomb_y = bomb_y_list[j]
strength = bomb_blast_strength_obs[bomb_x, bomb_y]
life = bomb_life_obs[bomb_x, bomb_y]
bomb_set[(bomb_x,bomb_y)] = (strength, life)
bomb_expand(bomb_input, (bomb_x,bomb_y), strength, life, bomb_set, board_obs)
return bomb_input
def bomb_expand(bomb_input, bomb_pos, strength, life, bomb_set, board_obs):
bomb_x, bomb_y = bomb_pos
bomb_input[bomb_x, bomb_y] = life
# Up Expand
bomb_expand_direction(bomb_input, bomb_pos, strength, life, bomb_set, board_obs, (-1, 0))
# Down Expand
bomb_expand_direction(bomb_input, bomb_pos, strength, life, bomb_set, board_obs, (1, 0))
# Left Expand
bomb_expand_direction(bomb_input, bomb_pos, strength, life, bomb_set, board_obs, (0, -1))
# Right Expand
bomb_expand_direction(bomb_input, bomb_pos, strength, life, bomb_set, board_obs, (0, 1))
def bomb_expand_direction(bomb_input, bomb_pos, strength, life, bomb_set, board_obs, direction):
bomb_x, bomb_y = bomb_pos
for i in range(1, int(strength)):
bomb_new_x = bomb_x + i * direction[0]
bomb_new_y = bomb_y + i * direction[1]
if bomb_new_x < 0 or bomb_new_x >= bomb_input.shape[0]:
break
if bomb_new_y < 0 or bomb_new_y >= bomb_input.shape[1]:
break
if board_obs[bomb_new_x, bomb_new_y] == 1:
break
elif board_obs[bomb_new_x, bomb_new_y] == 2:
bomb_input[bomb_new_x, bomb_new_y] = life
break
elif bomb_input[bomb_new_x, bomb_new_y] > life or bomb_input[bomb_new_x, bomb_new_y] == 0:
bomb_input[bomb_new_x, bomb_new_y] = life
if (bomb_new_x, bomb_new_y) in bomb_set:
old_bomb_strength = bomb_set[(bomb_new_x, bomb_new_y)][0]
bomb_expand(bomb_input, (bomb_new_x, bomb_new_y), old_bomb_strength, life, bomb_set, board_obs)
def augment_data(X, y):
# Up Down Flip
up_down_flip_X = np.copy(X)
for i in range(X.shape[0]):
up_down_flip_X[i,:,:] = np.flipud(X[i,:,:])
up_down_flip_y = np.copy(y)
up_down_flip_y[1] = y[2]
up_down_flip_y[2] = y[1]
# Left Right Flip
left_right_flip_X = np.copy(X)
for i in range(X.shape[0]):
left_right_flip_X[i,:,:] = np.fliplr(X[i,:,:])
left_right_flip_y = np.copy(y)
left_right_flip_y[3] = y[4]
left_right_flip_y[4] = y[3]
# up left to right down diagonal flip
diag_filp1_X = np.copy(X)
for i in range(X.shape[0]):
diag_filp1_X[i,:,:] = np.rot90(np.fliplr(X[i,:,:]))
diag_filp1_y = | np.copy(y) | numpy.copy |
# -*- coding: utf-8 -*-
__all__ = ["TransitModel", "setup_fit"]
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as pl
from scipy.optimize import minimize
import george
from george import kernels
import transit
from .catalogs import KOICatalog
from .data import load_light_curves_for_kic
class TransitModel(object):
eb = beta(1.12, 3.09)
def __init__(self, spec, gps, system, smass, smass_err, srad, srad_err,
fit_lcs, other_lcs):
self.spec = spec
# Put a prior range on the reference time.
t0 = system.bodies[0].t0
self.t0rng = t0 + np.array([-0.5, 0.5])
# Put a minimum prior bound on the period.
tmn = np.min([np.min(lc.time) for lc in np.append(other_lcs, fit_lcs)])
tmx = np.max([np.max(lc.time) for lc in np.append(other_lcs, fit_lcs)])
self.min_period = np.max([np.abs(t0 - tmn), np.abs(tmx - t0)])
if system.bodies[0].period < self.min_period:
self.min_period = 0.8 * system.bodies[0].period
self.gps = gps
self.system = system
self.smass = smass
self.smass_err = smass_err
self.srad = srad
self.srad_err = srad_err
self.fit_lcs = fit_lcs
self.other_lcs = other_lcs
body = self.system.bodies[0]
mx = (
self.lnprob(self.system.get_vector())[0],
(body.b, body.radius, body.period, body.e, body.omega)
)
prng = np.exp(np.append(np.linspace(np.log(0.5), np.log(2.0), 6), 0))
rrng = body.radius * np.append(np.linspace(0.8, 1.2, 6), 1.0)
rstar = self.system.central.radius
for ecc in np.linspace(0, 0.8, 4):
body.e = ecc
for w in np.linspace(-np.pi, np.pi, 4):
body.omega = w
for per in body.period*prng:
body.period = per
for rad in rrng:
body.radius = rad
for b in np.linspace(0, 1.0+0.9*rad/rstar, 6):
body.b = b
ll, blob = self.lnprob(self.system.get_vector())
if ll > mx[0] and blob[0] == 0:
mx = (ll, (b, rad, per, ecc, w))
body.e = mx[1][3]
body.omega = mx[1][4]
body.period = mx[1][2]
body.b = mx[1][0]
body.radius = mx[1][1]
# Probabilistic model:
def lnprior(self):
if not (self.t0rng[0] < self.system.bodies[0].t0 < self.t0rng[1]):
return -np.inf
star = self.system.central
if np.any([b.radius > star.radius for b in self.system.bodies]):
return -np.inf
lp = 0.0
# planet
body = self.system.bodies[0]
# Minimum period.
if body.period < self.min_period:
return -np.inf
# Impact parameter.
if body.b < 0.0:
return -np.inf
elif body.b > 2.0:
return -np.inf
elif body.b > 1.0:
lp += np.log(np.cos(0.5*np.pi*(1.0 - body.b)))
# lp -= body.b - 1.0
# Flat in log a (and period)
lp -= np.log(body.a)
# Beta in eccen
lp += self.eb.logpdf(body.e)
# stellar parameters
lp -= 0.5 * (
((star.mass - self.smass) / self.smass_err) ** 2 +
((star.radius - self.srad) / self.srad_err) ** 2
)
# limb darkening etc.
lp += self.system.jacobian()
return lp
def lnlike(self, compute_blob=True):
system = self.system
ll = 0.0
for gp, lc in zip(self.gps, self.fit_lcs):
mu = system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
r = (lc.flux - mu) * 1e3
ll += gp.lnlikelihood(r, quiet=True)
if not (np.any(mu < system.central.flux) and np.isfinite(ll)):
return -np.inf, (0, 0.0)
y = system.light_curve(system.bodies[0].t0, texp=lc.texp, maxdepth=2)
f = system.central.flux
depth = float((f - y) / f)
if not compute_blob:
return ll, (0, depth)
# # Compute number of cadences with transits in the other light curves.
# ncad = sum((system.light_curve(lc.time) < system.central.flux).sum()
# for lc in self.other_lcs)
ncad = 0
return ll, (ncad, depth)
def _update_params(self, theta):
self.system.set_vector(theta[:len(self.system)])
def lnprob(self, theta, compute_blob=True):
blob = [None, 0, 0.0, 0.0]
try:
self._update_params(theta)
except ValueError:
return -np.inf, blob
blob[0] = (
self.system.bodies[0].period,
self.system.bodies[0].e,
self.system.bodies[0].b,
)
blob[3] = lp = self.lnprior()
if not np.isfinite(lp):
return -np.inf, blob
ll, (blob[1], blob[2]) = self.lnlike(compute_blob=compute_blob)
if not np.isfinite(ll):
return -np.inf, blob
# reject samples with other transits.
if blob[1] > 0:
return -np.inf, blob
return lp + ll, blob
def __call__(self, theta):
return self.lnprob(theta)
def optimize(self, niter=3):
self.system.freeze_parameter("central:*")
self.system.freeze_parameter("bodies*t0")
p0 = self.system.get_vector()
r = minimize(self._nll, p0, jac=self._grad_nll, method="L-BFGS-B")
if r.success:
self.system.set_vector(r.x)
else:
self.system.set_vector(p0)
self.system.bodies[0].b = np.abs(self.system.bodies[0].b)
self.system.thaw_parameter("central:*")
self.system.thaw_parameter("bodies*t0")
if not niter > 1:
self.system.freeze_parameter("central:*")
p0 = self.system.get_vector()
r = minimize(self._nll, p0, jac=self._grad_nll, method="L-BFGS-B")
# Thaw the stellar parameters.
self.system.thaw_parameter("central:*")
self.system.freeze_parameter("central:dilution")
# Final optimization.
p0 = self.system.get_vector()
r = minimize(self._nlp, p0, method="L-BFGS-B")
return
for gp, lc in zip(self.gps, self.fit_lcs):
mu = self.system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
r = (lc.flux - mu) * 1e3
p0 = gp.get_vector()
r = minimize(gp.nll, p0, jac=gp.grad_nll, args=(r, ))
if r.success:
gp.set_vector(r.x)
else:
gp.set_vector(p0)
self.optimize(niter=niter - 1)
def _nlp(self, theta):
lnp, blob = self.lnprob(theta)
if blob[1] > 0 or not np.isfinite(lnp):
return 1e10
return -lnp
def _nll(self, theta):
try:
self.system.set_vector(theta)
except ValueError:
return 1e10
if self.system.bodies[0].period < self.min_period:
return 1e10
nll = 0.0
system = self.system
for gp, lc in zip(self.gps, self.fit_lcs):
mu = system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
r = (lc.flux - mu) * 1e3
nll -= gp.lnlikelihood(r, quiet=True)
if not (np.any(mu < system.central.flux) and np.isfinite(nll)):
return 1e10
return nll
def _grad_nll(self, theta):
try:
self.system.set_vector(theta)
except ValueError:
return np.zeros_like(theta)
system = self.system
g = np.zeros_like(theta)
for gp, lc in zip(self.gps, self.fit_lcs):
mu = system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
gmu = 1e3 * system.get_gradient(lc.time, texp=lc.texp, maxdepth=2)
r = (lc.flux - mu) * 1e3
alpha = gp.apply_inverse(r)
g -= np.dot(gmu, alpha)
if not (np.any(mu < system.central.flux)
and np.all(np.isfinite(g))):
return np.zeros_like(theta)
return g
def plot(self):
fig, ax = pl.subplots(1, 1)
t0 = self.system.bodies[0].t0
period = self.system.bodies[0].period
for gp, lc in zip(self.gps, self.fit_lcs):
t = (lc.time - t0 + 0.5*period) % period - 0.5*period
mu = self.system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
r = (lc.flux - mu) * 1e3
p = gp.predict(r, lc.time, return_cov=False) * 1e-3
ax.plot(t, lc.flux, "k")
ax.plot(t, p + self.system.central.flux, "g")
ax.plot(t, mu, "r")
return fig
def setup_fit(args, fit_kois=False, max_points=300):
kicid = args["kicid"]
# Initialize the system.
system = transit.System(transit.Central(
flux=1.0, radius=args["srad"], mass=args["smass"], q1=0.5, q2=0.5,
))
system.add_body(transit.Body(
radius=args["radius"],
period=args["period"],
t0=args["t0"],
b=args["impact"],
e=1.123e-7,
omega=0.0,
))
if fit_kois:
kois = KOICatalog().df
kois = kois[kois.kepid == kicid]
for _, row in kois.iterrows():
system.add_body(transit.Body(
radius=float(row.koi_ror) * args["srad"],
period=float(row.koi_period),
t0=float(row.koi_time0bk) % float(row.koi_period),
b=float(row.koi_impact),
e=1.123e-7,
omega=0.0,
))
system.thaw_parameter("*")
system.freeze_parameter("bodies*ln_mass")
# Load the light curves.
lcs, _ = load_light_curves_for_kic(kicid, remove_kois=not fit_kois)
# Which light curves should be fit?
fit_lcs = []
other_lcs = []
gps = []
for lc in lcs:
f = system.light_curve(lc.time, lc.texp)
if np.any(f < 1.0):
i = | np.argmin(f) | numpy.argmin |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 11:09:03 2020
@author: <NAME>
"""
import sys, os
import numpy as np
from math import ceil
import xarray as xr
import multiprocessing as mpi
import time
from joblib import Parallel, delayed
from tqdm import tqdm
import scipy.stats as st
from scipy.signal import filtfilt, cheby1, argrelmax, find_peaks
def get_vector_list_index_of_extreme_events(event_data):
extreme_event_index_matrix=[]
for i, e in enumerate(event_data):
ind_list= np.where(e>0)[0]
extreme_event_index_matrix.append(ind_list)
return np.array(extreme_event_index_matrix, dtype=object)
def remove_consecutive_days(event_data, event_data_idx):
"""
Consecutive days with rainfall above the threshold are considered as single events
and placed on the first day of occurrence.
Example:
event_series_matrix = compute_event_time_series(fully_proccessed_data, var)
all_event_series=flatten_lat_lon_array(event_series_matrix)
extreme_event_index_matrix=es.get_vector_list_index_of_extreme_events(all_event_series)
this_series_idx=extreme_event_index_matrix[index]
print(this_series_idx)
corr_all_event_series=es.remove_consecutive_days(all_event_series, extreme_event_index_matrix)
corr_extreme_event_index_matrix=es.get_vector_list_index_of_extreme_events(corr_all_event_series)
this_series_idx=corr_extreme_event_index_matrix[index]
print(this_series_idx)
Parameters
----------
event_data : Array
Array containing event_data
event_data_idx : Array
Array containing indices of all events in event_data
Returns
-------
event_data : Array
Corrected array of event data.
"""
if len(event_data) != len(event_data_idx):
raise ValueError("ERROR! Event data and list of idx event data are not of the same length!")
for i, e in enumerate(event_data):
this_series_idx = event_data_idx[i]
this_series_idx_1nb = event_data_idx[i] + 1
this_series_idx_2nb = event_data_idx[i] + 2
# this_series_idx_3nb=extreme_event_index_matrix[i] +3
intersect_1nb=np.intersect1d(this_series_idx, this_series_idx_1nb )
intersect_2nb=np.intersect1d(intersect_1nb, this_series_idx_2nb )
# intersect_3nb=np.intersect1d(intersect_2nb,this_series_idx_3nb )
e[intersect_1nb]=0
e[intersect_2nb]=0
return event_data
def randomize_e_matrix(e_matrix):
for idx, ts in enumerate(e_matrix):
e_matrix[idx] = np.random.permutation(ts)
return e_matrix
def event_synchronization(event_data, taumax=10, min_num_sync_events=10, randomize_ts=False):
num_time_series = len(event_data)
adj_matrix = np.zeros((num_time_series,num_time_series),dtype=int)
double_taumax = 2*taumax
extreme_event_index_matrix = get_vector_list_index_of_extreme_events(event_data)
event_data = remove_consecutive_days(event_data, extreme_event_index_matrix)
extreme_event_index_matrix = get_vector_list_index_of_extreme_events(event_data)
if randomize_ts is True:
extreme_event_index_matrix=randomize_e_matrix(extreme_event_index_matrix)
start=time.time()
print(f"Start computing event synchronization!")
for i, ind_list_e1 in enumerate(extreme_event_index_matrix):
# Get indices of event series 1
#ind_list_e1= np.where(e1>0)[0]
for j, ind_list_e2 in enumerate(extreme_event_index_matrix):
if i == j:
continue
sync_event=0
for m, e1_ind in enumerate(ind_list_e1[1:-1], start=1):
d_11_past = e1_ind-ind_list_e1[m-1]
d_11_next = ind_list_e1[m+1]-e1_ind
for n,e2_ind in enumerate(ind_list_e2[1:-1], start=1):
d_12_now = (e1_ind-e2_ind)
if d_12_now > taumax:
continue
d_22_past = e2_ind-ind_list_e2[n-1]
d_22_next = ind_list_e2[n+1]-e2_ind
tau = min(d_11_past, d_11_next, d_22_past, d_22_next, double_taumax) / 2
#print(tau, d_11_past, d_11_next, d_22_past, d_22_next, double_taumax)
if d_12_now <= tau and d_12_now >= 0:
sync_event += 1
#print("Sync: ", d_12_now, e1_ind, e2_ind, sync_event,n)
if d_12_now < -taumax:
#print('break!', d_12_now, e1_ind, e2_ind, )
break
# Createria if number of synchron events is relevant
if sync_event >= min_num_sync_events:
#print(i,j, sync_event)
adj_matrix[i, j] = 1
end = time.time()
print(end - start)
np.save('adj_matrix_gpcp.npy', adj_matrix)
print(adj_matrix)
return adj_matrix
def event_synchronization_one_series(extreme_event_index_matrix, ind_list_e1, i, taumax=10, min_num_sync_events=10):
double_taumax = 2*taumax
sync_time_series_indicies = []
# Get indices of event series 1
# ind_list_e1= np.where(e1>0)[0]
for j, ind_list_e2 in enumerate(extreme_event_index_matrix):
if i == j:
continue
sync_events = event_sync(ind_list_e1, ind_list_e2, taumax, double_taumax)
# Createria if number of synchron events is relevant
if sync_events >= min_num_sync_events:
# print(i,j, sync_event)
num_events_i = len(ind_list_e1)
num_events_j = len(ind_list_e2)
sync_time_series_indicies.append((j, num_events_i, num_events_j, sync_events))
return (i, sync_time_series_indicies)
def event_sync(ind_list_e1, ind_list_e2, taumax, double_taumax):
# Get indices of event series 2
# ind_list_e2=np.where(e2>0)[0]
sync_events = 0
#print(ind_list_e1)
#print(ind_list_e2)
for m, e1_ind in enumerate(ind_list_e1[1:-1], start=1):
d_11_past = e1_ind-ind_list_e1[m-1]
d_11_next = ind_list_e1[m+1]-e1_ind
for n, e2_ind in enumerate(ind_list_e2[1:-1], start=1):
d_12_now = (e1_ind-e2_ind)
if d_12_now > taumax:
continue
d_22_past = e2_ind-ind_list_e2[n-1]
d_22_next = ind_list_e2[n+1]-e2_ind
tau = min(d_11_past, d_11_next, d_22_past, d_22_next, double_taumax) / 2
#print(tau, d_11_past, d_11_next, d_22_past, d_22_next, double_taumax)
if d_12_now <= tau and d_12_now >= 0:
sync_events += 1
# print("Sync: ", d_12_now, e1_ind, e2_ind, sync_event,n)
if d_12_now < -taumax:
#print('break!', d_12_now, e1_ind, e2_ind, )
break
return sync_events
def prepare_es_input_data(event_data, rcd=True):
"""
Creates array of list, where events take place and removes consecutive days
"""
extreme_event_index_matrix = get_vector_list_index_of_extreme_events(event_data)
if rcd is True:
print("Start removing consecutive days...")
event_data = remove_consecutive_days(event_data, extreme_event_index_matrix)
extreme_event_index_matrix = get_vector_list_index_of_extreme_events(event_data)
print("End removing consecutive days!")
return extreme_event_index_matrix
def parallel_event_synchronization(event_data, taumax=10, min_num_sync_events=1, job_id=0, num_jobs=1, savepath="./E_matrix.npy", null_model=None,
):
num_time_series = len(event_data)
one_array_length = int(num_time_series/num_jobs) +1
extreme_event_index_matrix = prepare_es_input_data(event_data)
start_arr_idx = job_id*one_array_length
end_arr_idx = (job_id+1)*one_array_length
print(f"Start computing event synchronization for event data from {start_arr_idx} to {end_arr_idx}!")
# For parallel Programming
num_cpus_avail = mpi.cpu_count()
print(f"Number of available CPUs: {num_cpus_avail}")
parallelArray = []
start = time.time()
# Parallelizing by using joblib
backend = 'multiprocessing'
# backend='loky'
#backend='threading'
parallelArray = (Parallel(n_jobs=num_cpus_avail, backend=backend)
(delayed(event_synchronization_one_series)
(extreme_event_index_matrix, e1, start_arr_idx + i, taumax, min_num_sync_events)
for i, e1 in enumerate(tqdm(extreme_event_index_matrix[start_arr_idx:end_arr_idx]))
)
)
# Store output of parallel processes in adjecency matrix
adj_matrix_edge_list = []
print("Now store results in numpy array to hard drive!")
for process in tqdm(parallelArray):
i, list_sync_event_series = process
for sync_event in list_sync_event_series:
j, num_events_i, num_events_j, num_sync_events_ij = sync_event
thresh_null_model = null_model[num_events_i, num_events_j]
# Check if number of synchronous events is significant according to the null model
# Threshold needs to be larger (non >= !)
if num_sync_events_ij > thresh_null_model:
# print(
# f'i {i} {num_events_i}, j {j} {num_events_j} Sync_events {num_sync_events_ij} > {int(thresh_null_model)}')
if weighted is True:
if np.abs(hq - lq) < 0.001:
print(f'WARNING, hq{hq}=lq{lq}')
weight = 0
else:
weight = (num_sync_events_ij - med) / (hq - lq)
else:
weight = 1 # All weights are set to 1
adj_matrix_edge_list.append((int(i), int(j), weight))
# print(i, list_sync_event_series)
end = time.time()
print(end - start)
np.save(savepath, adj_matrix_edge_list)
print(f'Finished for job ID {job_id}')
return adj_matrix_edge_list
def event_sync_reg(ind_list_e1, ind_list_e2, taumax, double_taumax):
"""
ES for regional analysis that delivers specific timings.
It returns the
"""
sync_events = 0
t12_lst = []
t21_lst = []
t_lst = []
dyn_delay_lst = []
for m, e1_ind in enumerate(ind_list_e1[1:-1], start=1):
d_11_past = e1_ind-ind_list_e1[m-1]
d_11_next = ind_list_e1[m+1]-e1_ind
for n, e2_ind in enumerate(ind_list_e2[1:-1], start=1):
d_12_now = (e1_ind-e2_ind)
if d_12_now > taumax:
continue
d_22_past = e2_ind-ind_list_e2[n-1]
d_22_next = ind_list_e2[n+1]-e2_ind
tau = min(d_11_past, d_11_next, d_22_past, d_22_next, double_taumax) / 2
if abs(d_12_now) <= tau:
sync_events += 1
dyn_delay_lst.append(d_12_now)
if d_12_now < 0:
t12_lst.append(e1_ind)
t_lst.append(e1_ind)
elif d_12_now > 0:
t21_lst.append(e2_ind)
t_lst.append(e2_ind)
else:
t12_lst.append(e1_ind)
t21_lst.append(e2_ind)
t_lst.append(e2_ind)
if d_12_now < -taumax:
# print('break!', d_12_now, e1_ind, e2_ind, )
break
return (t_lst, t12_lst, t21_lst, dyn_delay_lst)
def es_reg(es_r1, es_r2, taumax ):
"""
"""
from itertools import product
if es_r1.shape[1] != es_r2.shape[1]:
raise ValueError("The number of time points of ts1 and ts 2 are not identical!")
num_tp = es_r1.shape[1]
es_r1 = prepare_es_input_data(es_r1)
es_r2 = prepare_es_input_data(es_r2)
comb_e12 = np.array(list(product(es_r1, es_r2)),dtype=object)
backend = 'multiprocessing'
# backend='loky'
# backend='threading'
num_cpus_avail = mpi.cpu_count()
print(f"Number of available CPUs: {num_cpus_avail}")
parallelArray = (Parallel(n_jobs=num_cpus_avail, backend=backend)
(delayed(event_sync_reg)
(e1, e2, taumax, 2*taumax)
for (e1, e2) in tqdm(comb_e12)
)
)
t12 = np.zeros(num_tp, dtype=int)
t21 = np.zeros(num_tp, dtype=int)
t = np.zeros(num_tp)
for (t_e, t12_e, t21_e, _) in parallelArray:
t[t_e] += 1
t12[t12_e] += 1
t21[t21_e] += 1
return t, t12, t21
def get_network_comb(c_indices1, c_indices2, adjacency=None):
from itertools import product
comb_c12 = np.array(list(product(c_indices1, c_indices2)), dtype=object)
if adjacency is None:
return comb_c12
else:
comb_c12_in_network = []
for (c1, c2) in tqdm(comb_c12) :
if adjacency[c1][c2] == 1 or adjacency[c2][c1] == 1:
comb_c12_in_network.append([c1, c2])
if len(comb_c12) == len(comb_c12_in_network):
print("WARNING! All links in network seem to be connected!")
return np.array(comb_c12_in_network, dtype=object)
def get_network_comb_es(c_indices1, c_indices2, ind_ts_dict1, ind_ts_dict2, adjacency=None):
comb_c12_in_network = get_network_comb(c_indices1, c_indices2, adjacency=adjacency)
print("Get combinations!")
comb_e12 = []
for (c1, c2) in comb_c12_in_network:
e1 = ind_ts_dict1[c1]
e2 = ind_ts_dict2[c2]
comb_e12.append([e1, e2])
comb_e12 = np.array(comb_e12, dtype=object)
return comb_e12
def es_reg_network(ind_ts_dict1, ind_ts_dict2, taumax, adjacency=None):
"""
ES between 2 regions. However, only links are considered that are found to be statistically significant
"""
from itertools import product
c_indices1 = ind_ts_dict1.keys()
c_indices2 = ind_ts_dict2.keys()
es1 = np.array(list(ind_ts_dict1.values()))
es2 = np.array(list(ind_ts_dict2.values()))
if es1.shape[1] != es2.shape[1]:
raise ValueError("The number of time points of ts1 and ts 2 are not identical!")
num_tp = es1.shape[1]
es_r1 = prepare_es_input_data(es1)
es_r2 = prepare_es_input_data(es2)
ind_ts_dict1 = dict(zip(c_indices1, es_r1))
ind_ts_dict2 = dict(zip(c_indices2, es_r2))
backend = 'multiprocessing'
comb_c12_in_network = get_network_comb(c_indices1, c_indices2, adjacency=adjacency)
print("Get combinations!")
comb_e12 = []
for (c1, c2) in comb_c12_in_network:
e1 = ind_ts_dict1[c1]
e2 = ind_ts_dict2[c2]
comb_e12.append([e1, e2])
comb_e12 = np.array(comb_e12, dtype=object)
# print(comb_e12)
num_cpus_avail = mpi.cpu_count()
print(f"Number of available CPUs: {num_cpus_avail}")
parallelArray = (
Parallel(n_jobs=num_cpus_avail, backend=backend)
(delayed(event_sync_reg)
(e1, e2, taumax, 2*taumax)
for(e1, e2) in tqdm(comb_e12)
)
)
t12 = np.zeros(num_tp, dtype=int)
t21 = np.zeros(num_tp, dtype=int)
t = np.zeros(num_tp)
# dyn_delay_arr=np.array([])
dyn_delay_arr = []
for (t_e, t12_e, t21_e, dyn_delay) in tqdm(parallelArray):
t[t_e] += 1
t12[t12_e] += 1
t21[t21_e] += 1
dyn_delay_arr.append(dyn_delay)
# dyn_delay_arr=np.concatenate([dyn_delay_arr, np.array(dyn_delay)], axis=0 )
dyn_delay_arr = np.concatenate(dyn_delay_arr, axis=0)
return t, t12, t21, dyn_delay_arr
# %% Null model
def get_null_model_adj_matrix_from_E_files(E_matrix_folder, num_time_series,
savepath=None):
if os.path.exists(E_matrix_folder):
path = E_matrix_folder
E_matrix_files = [os.path.join(path, fn) for fn in next(os.walk(path))[2]]
else:
raise ValueError(f"E_matrix Folder {E_matrix_folder} does not exist!")
adj_matrix = np.zeros((num_time_series, num_time_series), dtype=int)
weight_matrix = np.zeros((num_time_series, num_time_series))
for filename in tqdm(E_matrix_files):
print(f"Read Matrix with name {filename}")
if os.path.isfile(filename):
this_E_matrix = np.load(filename)
else:
raise ValueError(f"WARNING! File does not exist {filename}!")
for adj_list in tqdm(this_E_matrix):
i, j = adj_list
adj_matrix[i, j] = 1
if savepath is not None:
np.save(savepath, adj_matrix)
print(f'Finished computing Adjency Matrix for Null model with {num_time_series} time series!')
return adj_matrix
def null_model_one_series(i, min_num_events, l, num_permutations, taumax, double_taumax):
list_thresholds_i = []
for j in range(min_num_events, i + 1):
season1 = np.zeros(l, dtype="bool")
season2 = np.zeros(l, dtype="bool")
season1[:i] = 1
season2[:j] = 1
dat = np.zeros((2, l), dtype="bool")
cor = np.zeros(num_permutations)
for k in range(num_permutations):
dat[0] = np.random.permutation(season1)
dat[1] = np.random.permutation(season2)
ind_list_e1, ind_list_e2 = get_vector_list_index_of_extreme_events(dat)
cor[k] = event_sync(ind_list_e1, ind_list_e2, taumax, double_taumax)
th05 = np.quantile(cor, 0.95)
th02 = np.quantile(cor, 0.98)
th01 = np.quantile(cor, 0.99)
th005 = np.quantile(cor, 0.995)
th001 = np.quantile(cor, 0.999)
list_thresholds_i.append([j, th05, th02, th01, th005, th001])
return i, list_thresholds_i
def null_model_distribution(length_time_series, taumax=10,
min_num_events=10, max_num_events=1000,
num_permutations=3000, savepath=None):
print("Start creating Null model of Event time series!")
print(f"Model distribution size: {num_permutations}")
l = length_time_series
double_taumax = 2*taumax
size = max_num_events-min_num_events
# num_ij_pairs = ceil(size*(size + 1) / 2) # "Kleiner Gauss"
print(f"Size of Null_model Matrix: {size}")
size = max_num_events
P1 = np.zeros((size, size))
P2 = np.zeros((size, size))
P3 = np.zeros((size, size))
P4 = np.zeros((size, size))
P5 = np.zeros((size, size))
# For parallel Programming
num_cpus_avail = mpi.cpu_count()
# num_cpus_avail=1
print(f"Number of available CPUs: {num_cpus_avail}")
backend = 'multiprocessing'
# backend='loky'
# backend='threading'
# Parallelizing by using joblib
parallelArray = (Parallel(n_jobs=num_cpus_avail, backend=backend)
(delayed(null_model_one_series)
(i, min_num_events, l, num_permutations, taumax, double_taumax)
for i in tqdm(range(min_num_events, max_num_events))
)
)
print("Now store results in numpy array to hard drive!")
for process in tqdm(parallelArray):
i, list_thresholds_i = process
for j_thresholds in list_thresholds_i:
j, th05, th02, th01, th005, th001 = j_thresholds
P1[i, j] = P1[j, i] = th05
P2[i, j] = P2[j, i] = th02
P3[i, j] = P3[j, i] = th01
P4[i, j] = P4[j, i] = th005
P5[i, j] = P5[j, i] = th001
# Fill P for events smaller thresholds
for i in range(0, min_num_events):
for j in range(0, max_num_events):
P1[i, j] = P1[j, i] = np.nan
P2[i, j] = P2[j, i] = np.nan
P3[i, j] = P3[j, i] = np.nan
P4[i, j] = P4[j, i] = np.nan
P5[i, j] = P5[j, i] = np.nan
| np.save(savepath + '_threshold_05.npy', P1) | numpy.save |
import logging
import os
import traceback
from argparse import ArgumentParser
from typing import List
import numpy as np
import pandas as pd
from scipy import stats
from record import Record, record_factory, EXPECTED_SUBGRAPH_NUMBER, convert_subgraph_index_to_label
from visualize import boxplot, lineplot, heatmap, scatterplot, MultiPageContext, errorbar
def rankdata_greater(row):
return stats.rankdata(-row, method="ordinal")
def get_consecutive_rank_tau(df):
ret = np.zeros((len(df) - 1,))
for i in range(1, len(df)):
ret[i - 1], _ = stats.kendalltau(df.iloc[i - 1], df.iloc[i])
return ret
def get_tau_curves_by_groups(df, gt, group_table, groups):
return {cur: get_tau_along_epochs(df, gt, np.where(group_table == cur)[0]) for cur in groups}
def get_tau_along_epochs(df, gt, group):
return np.array([stats.kendalltau(row[group].values, gt[group])[0] for _, row in df.iterrows()])
def get_tau_along_epochs_combining_best_groups(df, gt, group_table, groups, universe):
tau_curves_by_groups = get_tau_curves_by_groups(df, gt, group_table, groups)
ref_gt_acc = np.zeros((len(df), EXPECTED_SUBGRAPH_NUMBER))
for cur in groups:
# for each group, enumerate the epochs from the most obedient to most rebellious
for i, loc in enumerate(np.argsort(-tau_curves_by_groups[cur])):
group_mask = np.where(group_table == cur)[0]
ref_gt_acc[i][group_mask] = df[group_mask].iloc[loc]
ref_gt_acc_tau = np.array([stats.kendalltau(acc[universe], gt[universe])[0] for acc in ref_gt_acc])
return ref_gt_acc, ref_gt_acc_tau
def get_top_k_acc_rank(acc_table, acc_gt):
gt_rank = rankdata_greater(acc_gt)
idx = np.stack([np.argsort(-row) for row in acc_table])
top_acc = np.maximum.accumulate(acc_gt[idx], 1)
top_rank = np.minimum.accumulate(gt_rank[idx], 1)
return top_acc, top_rank
def report_mean_std_max_min(analysis_dir, logger, name, arr):
np.savetxt(os.path.join(analysis_dir, "METRICS-{}.txt".format(name)),
np.array([np.mean(arr), np.std(arr), np.max(arr), np.min(arr)]))
logger.info("{}: mean={:.4f}, std={:.4f}, max={:.4f}, min={:.4f}".format(name, np.mean(arr), np.std(arr),
np.max(arr), np.min(arr)))
def stack_with_index(index, row):
return np.stack([index, row]).T
def plot_top_k_variance_chart(filepath, index, top_acc, top_rank, gt_acc, topk):
gt_acc_index = np.argsort(-gt_acc)
curves = []
for k in topk:
curves.append(stack_with_index(index, np.array([gt_acc[gt_acc_index[k - 1]]] * top_acc.shape[0])))
curves.append(stack_with_index(index, top_acc[:, k - 1]))
lineplot(curves, filepath=filepath + "_acc")
curves = []
for k in topk:
curves.append(stack_with_index(index, np.array([k] * top_acc.shape[0])))
curves.append(stack_with_index(index, top_rank[:, k - 1]))
lineplot(curves, filepath=filepath + "_rank", inverse_y=True)
def pipeline_for_single_instance(logger, analysis_dir, main: Record, finetune: List[Record], by: str, gt: np.ndarray):
logger.info("Analysing results for {}".format(analysis_dir))
main_df = main.validation_acc_dataframe(by)
main_archit = main.grouping_subgraph_training_dataframe(by)
main_grouping = main.grouping_numpy
os.makedirs(analysis_dir, exist_ok=True)
# Save raw data
main_df.to_csv(os.path.join(analysis_dir, "val_acc_all_epochs.csv"), index=True)
np.savetxt(os.path.join(analysis_dir, "group_info.txt"), main_grouping, "%d")
# correlation between subgraphs
corr_matrix = main_df.corr().values
heatmap(corr_matrix, filepath=os.path.join(analysis_dir, "corr_heatmap"))
np.savetxt(os.path.join(analysis_dir, "corr_heatmap.txt"), corr_matrix)
# Consecutive tau (single)
consecutive_taus = get_consecutive_rank_tau(main_df)
lineplot([np.array(list(zip(main_df.index[1:], consecutive_taus)))],
filepath=os.path.join(analysis_dir, "consecutive_tau_single"))
# GT rank (for color reference)
gt_rank = rankdata_greater(gt)
gt_rank_color = 1 - gt_rank / EXPECTED_SUBGRAPH_NUMBER
# in some cases, it could be a subset of 64 subgraphs; process this later
# Acc variance (lineplot)
acc_curves = [np.array(list(zip(main_df.index, main_df[i]))) for i in main_df.columns]
subgraph_markers = [[] for _ in range(EXPECTED_SUBGRAPH_NUMBER)]
if len(main.groups) != len(main.columns): # hide it for ground truth
for i, (_, row) in enumerate(main_archit.iterrows()):
for k in filter(lambda k: k >= 0, row.values):
subgraph_markers[k].append(i)
else:
logger.info("Markers hidden because groups == columns")
lineplot(acc_curves, filepath=os.path.join(analysis_dir, "acc_curve_along_epochs"),
color=[gt_rank_color[i] for i in main_df.columns], alpha=0.7,
markers=[subgraph_markers[i] for i in main_df.columns],
fmt=["-D"] * len(acc_curves))
# Rank version of df
df_rank = main_df.apply(rankdata_greater, axis=1, result_type="expand")
df_rank.columns = main_df.columns
# Rank variance (lineplot)
rank_curves = [np.array(list(zip(df_rank.index, df_rank[i]))) for i in df_rank.columns]
lineplot(rank_curves, filepath=os.path.join(analysis_dir, "rank_curve_along_epochs"),
color=[gt_rank_color[i] for i in df_rank.columns], alpha=0.7, inverse_y=True, markers=subgraph_markers)
# Rank variance for top-5 subgraphs found at half and end
# recalculate for original order
for loc in [len(main_df) // 2, len(main_df) - 1]:
selected_rank_curves = [rank_curves[i] for i in np.argsort(-main_df.iloc[loc])[:5]]
lineplot(selected_rank_curves, inverse_y=True,
filepath=os.path.join(analysis_dir, "rank_curves_along_epochs_for_ep{}".format(main_df.index[loc])))
# Rank variance (boxplot), sorted by the final rank
boxplot(sorted(df_rank.values.T, key=lambda d: d[-1]),
filepath=os.path.join(analysis_dir, "rank_boxplot_along_epochs_sorted_final_rank"),
inverse_y=True)
gt_order = np.argsort(-gt)
# Group info
np.savetxt(os.path.join(analysis_dir, "group_info_sorted_gt.txt"), main_grouping[gt_order], "%d")
# Rank variance (boxplot), sorted by ground truth
boxplot([df_rank[i] for i in gt_order if i in df_rank.columns], inverse_y=True,
filepath=os.path.join(analysis_dir, "rank_boxplot_along_epochs_sorted_gt_rank"))
boxplot([df_rank[i][-10:] for i in gt_order if i in df_rank.columns], inverse_y=True,
filepath=os.path.join(analysis_dir, "rank_boxplot_along_epochs_sorted_gt_rank_last_10"))
# Tau every epoch
gt_tau_data = get_tau_along_epochs(main_df, gt, main.columns)
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window", gt_tau_data)
lineplot([stack_with_index(main_df.index, gt_tau_data)],
filepath=os.path.join(analysis_dir, "tau_curve_along_epochs"))
if finetune:
# Finetune curves
for data in finetune:
try:
finetune_step = data.finetune_step
if by == "epochs":
finetune_step //= 196
half_length = len(main_df.loc[main_df.index <= finetune_step])
finetune_df = data.validation_acc_dataframe(by, cutoff=finetune_step).iloc[:half_length]
if finetune_step < min(main_df.index) - 1 or finetune_step > max(main_df.index) + 1:
continue
finetune_df.index += finetune_step
finetune_curves = [np.array([[finetune_step, main_df.loc[finetune_step, i]]] +
list(zip(finetune_df.index, finetune_df[i])))
for i in main_df.columns]
finetune_tau_curve = get_tau_along_epochs(finetune_df, gt, data.columns)
finetune_colors = [gt_rank_color[i] for i in finetune_df.columns]
logger.info("Finetune step {}, found {} finetune curves".format(finetune_step, len(finetune_curves)))
lineplot([c[:half_length] for c in acc_curves] + finetune_curves,
filepath=os.path.join(analysis_dir,
"acc_curve_along_epochs_finetune_{}".format(finetune_step)),
color=[gt_rank_color[i] for i in main_df.columns] + finetune_colors, alpha=0.7,
fmt=["-"] * len(acc_curves) + [":"] * len(finetune_curves))
lineplot([stack_with_index(main_df.index, gt_tau_data)[:half_length],
np.concatenate((np.array([[finetune_step, gt_tau_data[half_length - 1]]]),
stack_with_index(finetune_df.index, finetune_tau_curve)))],
filepath=os.path.join(analysis_dir,
"tau_curve_along_epochs_finetune_{}".format(finetune_step)),
color=["tab:blue", "tab:blue"], alpha=1, fmt=["-", ":"])
except ValueError:
pass
# Tau every epoch group by groups
grouping_info_backup = main.grouping_info.copy()
divide_group = main.group_number == 1 and len(main.columns) == 64
for partition_file in [None] + list(os.listdir("assets")):
suffix = ""
if partition_file is not None:
if not partition_file.startswith("partition"):
continue
if not divide_group:
continue
suffix = "_" + os.path.splitext(partition_file)[0]
# regrouping
main.grouping_info = {idx: g for idx, g in enumerate(np.loadtxt(os.path.join("assets", partition_file),
dtype=np.int))}
tau_curves_by_groups = get_tau_curves_by_groups(main_df, gt, main.grouping_numpy, main.groups)
tau_curves_by_groups_mean = [np.mean(tau_curves_by_groups[cur]) for cur in main.groups]
tau_curves_by_groups_std = [np.std(tau_curves_by_groups[cur]) for cur in main.groups]
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-By-Groups-Mean{}".format(suffix),
np.array(tau_curves_by_groups_mean))
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-By-Groups-Std{}".format(suffix),
np.array(tau_curves_by_groups_std))
tau_curves_by_groups_for_plt = [stack_with_index(main_df.index, tau_curves_by_groups[cur])
for cur in main.groups]
pd.DataFrame(tau_curves_by_groups, columns=main.groups, index=main_df.index).to_csv(
os.path.join(analysis_dir, "tau_curves_by_groups{}.csv".format(suffix))
)
lineplot(tau_curves_by_groups_for_plt,
filepath=os.path.join(analysis_dir, "tau_curves_by_groups{}".format(suffix)))
# Acc curves (by group)
with MultiPageContext(os.path.join(analysis_dir, "acc_curve_along_epochs_group_each{}".format(suffix))) as pdf:
for g in range(main.group_number):
subgraphs = np.where(main.grouping_numpy == g)[0]
gt_rank_group = [gt_rank_color[i] for i in subgraphs]
subgraph_names = list(map(convert_subgraph_index_to_label, subgraphs))
subgraph_names_ranks = ["{} (Rank {})".format(name, gt_rank[i])
for name, i in zip(subgraph_names, subgraphs)]
# cannot leverage acc_curves, because it's a list, this can be a subset, which cannot be used as index
lineplot([np.array(list(zip(main_df.index, main_df[i]))) for i in subgraphs] +
[stack_with_index(main_df.index, [gt[i]] * len(main_df.index)) for i in subgraphs],
context=pdf, color=gt_rank_group * 2, alpha=0.8, labels=subgraph_names_ranks,
fmt=["-D"] * len(subgraphs) + ["--"] * len(subgraphs),
markers=[subgraph_markers[i] for i in subgraphs] + [[]] * len(subgraphs),
title="Group {}, Subgraph {} -- {}".format(g, "/".join(map(str, subgraphs)),
"/".join(subgraph_names)))
main.grouping_info = grouping_info_backup
# Tau among steps
for k in (10, 64):
max_tau_calc = min(k, len(main_df))
tau_correlation = np.zeros((max_tau_calc, max_tau_calc))
for i in range(max_tau_calc):
for j in range(max_tau_calc):
tau_correlation[i][j] = stats.kendalltau(main_df.iloc[-i - 1], main_df.iloc[-j - 1])[0]
heatmap(tau_correlation, filepath=os.path.join(analysis_dir, "tau_correlation_last_{}".format(k)))
np.savetxt(os.path.join(analysis_dir, "tau_correlation_last_{}.txt".format(k)), tau_correlation)
tau_correlation = tau_correlation[np.triu_indices_from(tau_correlation, k=1)]
report_mean_std_max_min(analysis_dir, logger, "Tau-as-Corr-Last-{}".format(k), tau_correlation)
# Calculate best tau and log
ref_gt_acc, ref_gt_acc_tau = get_tau_along_epochs_combining_best_groups(main_df, gt, main_grouping, main.groups,
main.columns)
pd.DataFrame(ref_gt_acc).to_csv(os.path.join(analysis_dir,
"acc_epochs_combining_different_epochs_sorted_gt.csv"))
lineplot([stack_with_index(np.arange(len(ref_gt_acc_tau)), ref_gt_acc_tau)],
filepath=os.path.join(analysis_dir, "tau_curve_epochs_sorted_combining_different_epochs"))
# Show subgraph for each batch
scatterplot([stack_with_index(main_archit.index, main_archit[col]) for col in main_archit.columns],
filepath=os.path.join(analysis_dir, "subgraph_id_for_each_batch_validated"))
# Substituted with ground truth rank
scatterplot([stack_with_index(main_archit.index, gt_rank[main_archit[col]]) for col in main_archit.columns],
filepath=os.path.join(analysis_dir, "subgraph_rank_for_each_batch_validated"),
inverse_y=True)
# Top-K-Rank
top_acc, top_rank = get_top_k_acc_rank(main_df.values, gt)
plot_top_k_variance_chart(os.path.join(analysis_dir, "top_k_along_epochs"), main_df.index,
top_acc, top_rank, gt, (1, 3))
# Observe last window (for diff. epochs)
for k in (10, 64,):
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window-Last-{}".format(k), gt_tau_data[-k:])
for v in (1, 3):
report_mean_std_max_min(analysis_dir, logger, "Top-{}-Rank-Last-{}".format(v, k), top_rank[-k:, v - 1])
def pipeline_for_inter_instance(logger, analysis_dir, data, by, gt):
logger.info("Analysing results for {}".format(analysis_dir))
data_as_df = [d.validation_acc_dataframe(by) for d in data]
os.makedirs(analysis_dir, exist_ok=True)
subgraphs = data[0].columns
for d in data:
assert d.columns == subgraphs
final_acc = np.zeros((len(data), len(subgraphs)))
for i, df in enumerate(data_as_df):
final_acc[i] = df.iloc[-1]
# Consecutive tau (multi)
lineplot([np.array(list(zip(df.index[1:], get_consecutive_rank_tau(df)))) for df in data_as_df],
filepath=os.path.join(analysis_dir, "taus_consecutive_epochs"))
# Final acc distribution
boxplot(final_acc, filepath=os.path.join(analysis_dir, "final_acc"))
# Final rank distribution
final_rank = np.stack([rankdata_greater(row) for row in final_acc])
boxplot(final_rank, filepath=os.path.join(analysis_dir, "final_rank_boxplot"), inverse_y=True)
# GT-Tau
gt_tau = np.array([stats.kendalltau(row, gt[subgraphs])[0] for row in final_acc])
np.savetxt(os.path.join(analysis_dir, "inst_gt_tau.txt"), gt_tau)
report_mean_std_max_min(analysis_dir, logger, "GT-Tau", gt_tau)
# Tau every epoch
tau_data = [get_tau_along_epochs(df, gt, subgraphs) for df in data_as_df]
tau_data_mean_over_instances = np.mean(np.stack(tau_data, axis=0), axis=0)
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window", np.concatenate(tau_data))
tau_curves = [stack_with_index(df.index, tau_d) for df, tau_d in zip(data_as_df, tau_data)]
lineplot(tau_curves, filepath=os.path.join(analysis_dir, "tau_curve_along_epochs"))
for k in (10, 64):
tau_data_clip = [t[-k:] for t in tau_data]
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window-Last-{}-Mean".format(k),
np.array([np.mean(t) for t in tau_data_clip]))
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window-Last-{}-Std".format(k),
np.array([np.std(t) for t in tau_data_clip]))
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window-Last-{}-Max".format(k),
np.array([np.max(t) for t in tau_data_clip]))
report_mean_std_max_min(analysis_dir, logger, "GT-Tau-In-Window-Last-{}-Min".format(k),
np.array([np.min(t) for t in tau_data_clip]))
acc_data = [np.mean(df.iloc[-k:].values, axis=0) for df in data_as_df]
report_mean_std_max_min(analysis_dir, logger, "Acc-Mean-In-Window-Last-{}-Mean".format(k),
np.array([np.mean(x) for x in acc_data]))
report_mean_std_max_min(analysis_dir, logger, "Acc-Mean-In-Window-Last-{}-Std".format(k),
np.array([np.std(x) for x in acc_data]))
# S-Tau (last 5 epochs)
s_tau = np.zeros((min(map(lambda d: len(d), data_as_df)), len(data), len(data)))
for k in range(len(s_tau)):
for i, table1 in enumerate(data_as_df):
for j, table2 in enumerate(data_as_df):
s_tau[k][i][j], _ = stats.kendalltau(table1.iloc[k], table2.iloc[k])
np.savetxt(os.path.join(analysis_dir, "inter_inst_s_tau.txt"), s_tau[-1])
heatmap(s_tau[0], filepath=os.path.join(analysis_dir, "inter_inst_last_s_tau_heatmap"), figsize=(10, 10))
if len(data) > 1:
upper = np.triu_indices_from(s_tau[0], k=1)
report_mean_std_max_min(analysis_dir, logger, "S-Tau-Last", s_tau[-1][upper])
s_tau_mean = np.mean(s_tau[:, upper[0], upper[1]], axis=1)
s_tau_std = np.std(s_tau[:, upper[0], upper[1]], axis=1)
report_mean_std_max_min(analysis_dir, logger, "S-Tau-Min", s_tau[ | np.argmin(s_tau_mean) | numpy.argmin |
import os
import numpy as np
import xarray as xr
import zarr
import time
from dask.distributed import wait, Client, progress, Future
from typing import Union, Callable, Tuple, Any
from itertools import groupby, count
import shutil
from HSTB.kluster import kluster_variables
from HSTB.kluster.backends._base import BaseBackend
class ZarrBackend(BaseBackend):
"""
Backend for writing data to disk, used with fqpr_generation.Fqpr and xarray_conversion.BatchRead.
"""
def __init__(self, output_folder: str = None):
super().__init__(output_folder)
def _get_zarr_path(self, dataset_name: str, sys_id: str = None):
"""
Get the path to the zarr folder based on the dataset name that we provide. Ping zarr folders are based on
the serial number of the system, and that must be provided here as the sys_id
"""
if self.output_folder is None:
return None
if dataset_name == 'ping':
if not sys_id:
raise ValueError('Zarr Backend: No system id provided, cannot build ping path')
return os.path.join(self.output_folder, 'ping_' + sys_id + '.zarr')
elif dataset_name == 'navigation':
return os.path.join(self.output_folder, 'navigation.zarr')
elif dataset_name == 'ppnav':
return os.path.join(self.output_folder, 'ppnav.zarr')
elif dataset_name == 'attitude':
return os.path.join(self.output_folder, 'attitude.zarr')
else:
raise ValueError('Zarr Backend: Not a valid dataset name: {}'.format(dataset_name))
def _get_zarr_indices(self, zarr_path: str, time_array: list, append_dim: str):
"""
Get the chunk indices (based on the time dimension) using the proivded time arrays
"""
return get_write_indices_zarr(zarr_path, time_array, append_dim)
def _get_chunk_sizes(self, dataset_name: str):
"""
Pull from kluster_variables to get the correct chunk size for each dataset
"""
if dataset_name == 'ping':
return kluster_variables.ping_chunks
elif dataset_name in ['navigation', 'ppnav']:
return kluster_variables.nav_chunks
elif dataset_name == 'attitude':
return kluster_variables.att_chunks
else:
raise ValueError('Zarr Backend: Not a valid dataset name: {}'.format(dataset_name))
def _autodetermine_times(self, data: list, time_array: list = None, append_dim: str = 'time'):
"""
Get the time arrays for the dataset depending on the dataset type.
"""
if time_array:
return time_array
elif any([isinstance(d, Future) for d in data]):
raise ValueError('Zarr Backend: cannot autodetermine times from Futures')
else:
return [d[append_dim] for d in data]
def delete(self, dataset_name: str, variable_name: str, sys_id: str = None):
"""
Delete the provided variable name from the datastore on disk. var_path will be a directory of chunked files, so
we use rmtree to remove all files in the var_path directory.
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
var_path = os.path.join(zarr_path, variable_name)
if not os.path.exists(var_path):
print('Unable to remove variable {}, path does not exist: {}'.format(variable_name, var_path))
else:
shutil.rmtree(var_path)
def write(self, dataset_name: str, data: Union[list, xr.Dataset, Future], time_array: list = None, attributes: dict = None,
sys_id: str = None, append_dim: str = 'time', skip_dask: bool = False):
"""
Write the provided data to disk, finding the correct zarr folder using dataset_name. We need time_array to get
the correct write indices for the data. If attributes are provided, we write those as well as xarray Dataset
attributes.
"""
if not isinstance(data, list):
data = [data]
if attributes is None:
attributes = {}
time_array = self._autodetermine_times(data, time_array, append_dim)
zarr_path = self._get_zarr_path(dataset_name, sys_id)
data_indices, final_size, push_forward = self._get_zarr_indices(zarr_path, time_array, append_dim)
chunks = self._get_chunk_sizes(dataset_name)
fpths = distrib_zarr_write(zarr_path, data, attributes, chunks, data_indices, final_size, push_forward, self.client,
skip_dask=skip_dask, show_progress=self.show_progress,
write_in_parallel=self.parallel_write)
return zarr_path, fpths
def write_attributes(self, dataset_name: str, attributes: dict, sys_id: str = None):
"""
If the data is written to disk, we write the attributes to the zarr store as attributes of the dataset_name record.
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
if zarr_path is not None:
zarr_write_attributes(zarr_path, attributes)
else:
print('Writing attributes is disabled for in-memory processing')
def remove_attribute(self, dataset_name: str, attribute: str, sys_id: str = None):
"""
Remove the attribute matching name provided in the dataset_name_sys_id folder
"""
zarr_path = self._get_zarr_path(dataset_name, sys_id)
if zarr_path is not None:
zarr_remove_attribute(zarr_path, attribute)
else:
print('Removing attributes is disabled for in-memory processing')
def _get_indices_dataset_notexist(input_time_arrays):
"""
Build a list of [start,end] indices that match the input_time_arrays, starting at zero.
Parameters
----------
input_time_arrays
list of 1d xarray dataarrays or numpy arrays for the input time values
Returns
-------
list
list of [start,end] indexes for the indices of input_time_arrays
"""
running_total = 0
write_indices = []
for input_time in input_time_arrays:
write_indices.append([0 + running_total, len(input_time) + running_total])
running_total += len(input_time)
return write_indices
def _get_indices_dataset_exists(input_time_arrays: list, zarr_time: zarr.Array):
"""
I am so sorry for whomever finds this. I had this 'great' idea a while ago to concatenate all the multibeam
lines into daily datasets. Overall this has been a great thing, except for the sorting. We have to figure out
how to assemble daily datasets from lines applied in any order imaginable, with overlap and all kinds of things.
This function should provide the indices that allow this to happen.
Recommend examining the test_backend tests if you want to understand this a bit more
build the indices for where the input_time_arrays fit within the existing zarr_time. We have three ways to proceed
within this function:
1. input time arrays are entirely within the existing zarr_time, we build a numpy array of indices that describe
where the input_time_arrays will overwrite the zarr_time
2. input time arrays are entirely outside the existing zarr_time, we just build a 2 element list describing the
start and end index to append the data to zarr_time
3 input time arrays are before and might overlap existing data, we build a 2 element list starting with zero
describing the start and end index and return a push_forward value, letting us know how much the zarr data
needs to be pushed forward. If there is overlap, the last index is a numpy array of indices.
4 input time arrays are after and might overlap existing data, we build a 2 element list starting with the
index of the start of overlap. If there is overlap, the last index is a numpy array of indices.
Parameters
----------
input_time_arrays
list of 1d xarray dataarrays or numpy arrays for the input time values
zarr_time
zarr array 1d for the existing time values saved to disk
Returns
-------
list
list of either [start,end] indexes or numpy arrays for the indices of input_time_arrays in zarr_time
list
list of [index of push, total amount to push] for each push
int
how many values need to be inserted to make room for this new data at the beginning of the zarr rootgroup
"""
running_total = 0
push_forward = []
total_push = 0
write_indices = []
# time arrays must be in order in case you have to do the 'partly in datastore' workaround
input_time_arrays.sort(key=lambda x: x[0])
min_zarr_time = zarr_time[0]
max_zarr_time = zarr_time[-1]
zarr_time_len = len(zarr_time)
for input_time in input_time_arrays: # for each chunk of data that we are wanting to write, look at the times to see where it fits
input_time_len = len(input_time)
input_is_in_zarr = np.isin(input_time, zarr_time) # where is there overlap with existing data
if isinstance(input_time, xr.DataArray):
input_time = input_time.values
if input_is_in_zarr.any(): # this input array is at least partly in this datastore already
if not input_is_in_zarr.all(): # this input array is only partly in this datastore
starter_indices = np.full_like(input_time, -1) # -1 for values that are outside the existing values
inside_indices = search_not_sorted(zarr_time, input_time[input_is_in_zarr]) # get the indices for overwriting where there is overlap
starter_indices[input_is_in_zarr] = inside_indices
count_outside = len(starter_indices) - len(inside_indices) # the number of values that do not overlap
if starter_indices[-1] == -1: # this input_time contains times after the existing values
max_inside_index = inside_indices[-1]
# now add in a range starting with the last index for all values outside the zarr time range
starter_indices[~input_is_in_zarr] = np.arange(max_inside_index + 1, max_inside_index + count_outside + 1)
if input_time[-1] < max_zarr_time: # data partially overlaps and is after existing data, but not at the end of the existing dataset
push_forward.append([max_inside_index + 1 + total_push, count_outside])
else:
running_total += count_outside
write_indices.append(starter_indices + total_push)
elif starter_indices[0] == -1: # this input_time contains times before the existing values
if input_time[0] < min_zarr_time:
starter_indices = np.arange(input_time_len)
push_forward.append([total_push, count_outside])
else:
min_inside_index = inside_indices[0]
starter_indices = | np.arange(input_time_len) | numpy.arange |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import toolsClass
import multiprocessing
import time
from scipy.interpolate import interp1d
import scipy.integrate as integrate
#from tqdm.contrib.concurrent import process_map #for process bar. very slow...
tools = toolsClass.tools()
import logging
log = logging.getLogger(__name__)
class GYRO:
def __init__(self, rootDir, dataPath):
"""
rootDir is root location of python modules (where dashGUI.py lives)
dataPath is the location where we write all output to
"""
self.rootDir = rootDir
tools.rootDir = self.rootDir
self.dataPath = dataPath
tools.dataPath = self.dataPath
return
def allowed_class_vars(self):
"""
Writes a list of recognized class variables to HEAT object
Used for error checking input files and for initialization
Here is a list of variables with description:
testvar dummy for testing
"""
self.allowed_vars = [
'N_gyroSteps',
'gyroDeg',
'gyroT_eV',
'N_vSlice',
'N_vPhase',
'N_gyroPhase',
'ionMassAMU',
'vMode',
'ionFrac'
]
return
def setTypes(self):
"""
Set variable types for the stuff that isnt a string from the input file
"""
integers = [
'N_gyroSteps',
'gyroDeg',
'N_vSlice',
'N_vPhase',
'N_gyroPhase',
]
floats = [
'ionFrac',
'gyroT_eV',
'ionMassAMU',
]
for var in integers:
if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):
try:
setattr(self, var, int(getattr(self, var)))
except:
print("Error with input file var "+var+". Perhaps you have invalid input values?")
log.info("Error with input file var "+var+". Perhaps you have invalid input values?")
for var in floats:
if var is not None:
if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):
try:
setattr(self, var, float(getattr(self, var)))
except:
print("Error with input file var "+var+". Perhaps you have invalid input values?")
log.info("Error with input file var "+var+". Perhaps you have invalid input values?")
return
def setupConstants(self, ionMassAMU=2.014):
"""
Sets up constants
default mass is deuterium 2.014 MeV/c^2
"""
#unit conversions
self.kg2eV = 5.609e35 #1kg = 5.609e35 eV/c^2
self.eV2K = 1.160e4 #1ev=1.160e4 K
#constants
self.AMU = 931.494e6 #ev/c^2
self.kB = 8.617e-5 #ev/K
self.e = 1.602e-19 # C
self.c = 299792458 #m/s
self.diamag = -1 #diamagnetism = -1 for ions, 1 for electrons
self.mass_eV = ionMassAMU * self.AMU
self.Z=1 #assuming isotopes of hydrogen here
return
def temp2thermalVelocity(self, T_eV):
"""
Calculates thermal velocity from a temperature, where thermal velocity
is defined as the most probable speed
T_eV is temperature in eV
can also be found with: d/dv( v*f(v) ) = 0
note that this is for v, not vPerp or v||
"""
return np.sqrt(2.0*T_eV/(self.mass_eV/self.c**2))
def setupFreqs(self, B):
"""
Calculates frequencies, periods, that are dependent upon B
These definitions follow Freidberg Section 7.7.
B is magnetic field magnitude
"""
self.omegaGyro = self.Z * self.e * B / (self.mass_eV / self.kg2eV)
if np.isscalar(self.omegaGyro):
self.omegaGyro = np.array([self.omegaGyro])
self.fGyro = np.abs(self.omegaGyro)/(2*np.pi)
self.TGyro = 1.0/self.fGyro
return
def setupRadius(self, vPerp):
"""
calculates gyro radius.
rGyro has a column for each MC run (N_MC columns), and a
row for each point on the PFC (N_pts), so it is a matrix
of shape: N_pts X N_MC
"""
N_pts = len(self.omegaGyro)
#get number of vPerps
if np.isscalar(vPerp):
vPerp = np.array([vPerp])
N_MC = 1
else:
N_MC = len(vPerp)
self.rGyro = np.zeros((N_pts,N_MC))
for i in range(N_MC):
self.rGyro[:,i] = vPerp[i] / np.abs(self.omegaGyro)
return
def setupVelocities(self, N):
"""
sets up velocities based upon vMode input from GUI
N is the number of source mesh elements (ie len(PFC.centers) )
len(self.t1) is number of points in divertor we are calculating HF on
"""
#get velocity space phase angles
self.uniformVelPhaseAngle()
if self.vMode == 'single':
print("Gyro orbit calculation from single plasma temperature")
log.info("Gyro orbit calculation from single plasma temperature")
self.T0 = np.ones((N))*self.gyroT_eV
#get average velocity for each temperature point
self.vThermal = self.temp2thermalVelocity(self.T0)
#set upper bound of v*f(v) (note that this cuts off high energy particles)
self.vMax = 5 * self.vThermal
#get 100 points to initialize functional form of f(v) (note this is a 2D matrix cause vMax is 2D)
self.vScan = np.linspace(0,self.vMax,10000).T
#get velocity slices for each T0
self.pullEqualProbabilityVelocities()
else:
#TO ADD THIS YOU WILL NEED TO PASS IN XYZ COORDINATES OF CTRS AND INTERPOLATE
print("3D plasma temperature interpolation from file not yet supported. Run gyro orbits in single mode")
log.info("3D plasma temperature interpolation from file not yet supported. Run gyro orbits in single mode")
return
def pullEqualProbabilityVelocities(self):
"""
creates vSlices: array of velocities indexed to match T0 array (or PFC.centers)
each vSlice is positioned at a place in the PDF so it has an equal probability
of occuring. ie the area under the PDF curve between each vSlice is equal.
in loop, i is mesh element index
"""
self.vSlices = np.ones((len(self.T0),self.N_vSlice))*np.nan
self.energySlices = np.zeros((len(self.T0),self.N_vSlice))
self.energyIntegrals = np.zeros((len(self.T0),self.N_vSlice))
self.energyFracs = np.zeros((len(self.T0),self.N_vSlice))
self.vBounds = np.zeros((len(self.T0),self.N_vSlice+1))
for i in range(len(self.T0)):
#get speed range for this T0
v = self.vScan[i,:]
#generate the (here maxwellian) velocity vector PDF
#pdf = lambda x: (self.mass_eV/self.c**2) / (self.T0[i]) * np.exp(-(self.mass_eV/self.c**2 * x**2) / (2*self.T0[i]) )
pdf = lambda x: ( (self.mass_eV/self.c**2) / (2 * np.pi * self.T0[i]) )**(3.0/2.0) * np.exp(-(self.mass_eV/self.c**2 * x**2) / (2*self.T0[i]) )
#speed pdf (integrate over solid angle)
v_pdf = 4*np.pi * v**2 * pdf(v)
#generate the CDF
v_cdf = np.cumsum(v_pdf[1:])*np.diff(v)
v_cdf = np.insert(v_cdf, 0, 0)
#create bspline interpolators for the cdf and cdf inverse
inverseCDF = interp1d(v_cdf, v, kind='linear')
forwardCDF = interp1d(v, v_cdf, kind='linear')
#CDF location of vSlices and bin boundaries
cdfBounds = np.linspace(0,v_cdf[-1],self.N_vSlice+1)
#CDF location of velocity bin bounds omitting 0 and 1
#old method does not make vSlices truly bin centers
#cdfBounds = np.linspace(0,1,self.N_vSlice+1)[1:-1]
#old method 2 spaces centers uniformly
# #calculate N_vSlice velocities for each pdf each with equal area (probability)
# cdfMax = v_cdf[-1]
# cdfMin = v_cdf[0]
# sliceWidth = cdfMax / (self.N_vSlice+1)
# #CDF location of vSlices omitting 0 and 1
# cdfSlices = np.linspace(0,1,self.N_vSlice+2)[1:-1]
# #CDF location of velocity bin bounds omitting 0 and 1
# #old method does not make vSlices truly bin centers
# #cdfBounds = np.linspace(0,1,self.N_vSlice+1)[1:-1]
# #new method makes vSlices bin centers, except for the end bins
# cdfBounds = np.diff(cdfSlices)/2.0 + cdfSlices[:-1]
# #vSlices are Maxwellian distribution sample locations (@ bin centers)
# self.vSlices[i,:] = inverseCDF(cdfSlices)
# vBounds = inverseCDF(cdfBounds)
# vBounds = np.insert(vBounds,0,0)
# vBounds = np.append(vBounds,self.vMax[i])
#new method spaces bins uniformly, then makes vSlices center of these bins in CDF space
cdfSlices = np.diff(cdfBounds)/2.0 + cdfBounds[:-1]
#vSlices are Maxwellian distribution sample locations (@ bin centers)
self.vSlices[i,:] = inverseCDF(cdfSlices)
vBounds = inverseCDF(cdfBounds)
self.vBounds[i,:] = vBounds
#print(cdfBounds)
#print(cdfSlices)
#print(self.vBounds)
#print(self.vSlices)
#Now find energies that correspond to these vSlices
#we integrate: v**2 * f(v)
#energy pdf (missing 1/2*mass but that gets divided out later anyways )
#EofV = lambda x: x**2 * pdf(x)
#EofV = lambda x: 4*np.pi * x**4 * pdf(x)
f_E = lambda x: 2 * np.sqrt(x / np.pi) * (self.T0[i])**(-3.0/2.0) * np.exp(-x / self.T0[i])
#energy slices that correspond to velocity slices
self.energySlices[i,:] = f_E(0.5 * (self.mass_eV/self.c**2) * self.vSlices[i,:]**2)
#energy integrals
for j in range(self.N_vSlice):
Elo = 0.5 * (self.mass_eV/self.c**2) * vBounds[j]**2
Ehi = 0.5 * (self.mass_eV/self.c**2) * vBounds[j+1]**2
self.energyIntegrals[i,j] = integrate.quad(f_E, Elo, Ehi)[0]
energyTotal = self.energyIntegrals[i,:].sum()
#for testing
#if i==0:
# print("Integral Test===")
# print(energyTotal)
# print(integrate.quad(f_E, 0.0, self.vMax[i])[0])
#energy fractions
for j in range(self.N_vSlice):
self.energyFracs[i,j] = self.energyIntegrals[i,j] / energyTotal
print("Found N_vPhase velocities of equal probability")
log.info("Found N_vPhase velocities of equal probability")
return
def uniformGyroPhaseAngle(self):
"""
Uniform sampling of a uniform distribution between 0 and 2pi
returns angles in radians
"""
self.gyroPhases = np.linspace(0,2*np.pi,self.N_gyroPhase+1)[:-1]
return
def uniformVelPhaseAngle(self):
"""
Sampling of a uniform distribution between 0 and pi/2 (only forward velocities)
vPerp is x-axis of velocity space
vParallel is y-axis of velocity space
returns angles in radians
"""
self.vPhases = np.linspace(0.0,np.pi/2,self.N_vPhase+2)[1:-1]
return
def singleGyroTrace(self,vPerp,vParallel,gyroPhase,N_gyroSteps,
BtraceXYZ,controlfilePath,TGyro,rGyro,omegaGyro,
verbose=True):
"""
Calculates the gyro-Orbit path and saves to .csv and .vtk
vPerp and vParallel [m/s] are in velocities
gyroPhase [degrees] is initial orbit phase angle
N_gyroSteps is number of discrete line segments per gyro period
BtraceXYZ is the points of the Bfield trace that we will gyrate about
"""
print("Calculating gyro trace...")
#Loop thru B field trace while tracing gyro orbit
helixTrace = None
for i in range(len(BtraceXYZ)-1):
#points in this iteration
p0 = BtraceXYZ[i,:]
p1 = BtraceXYZ[i+1,:]
#vector
delP = p1 - p0
#magnitude or length of line segment
magP = np.sqrt(delP[0]**2 + delP[1]**2 + delP[2]**2)
#time it takes to transit line segment
delta_t = magP / (vParallel)
#Number of steps in line segment
Tsample = self.TGyro / N_gyroSteps
Nsteps = int(delta_t / Tsample)
#length (in time) along guiding center
t = np.linspace(0,delta_t,Nsteps+1)
#guiding center location
xGC = np.linspace(p0[0],p1[0],Nsteps+1)
yGC = np.linspace(p0[1],p1[1],Nsteps+1)
zGC = np.linspace(p0[2],p1[2],Nsteps+1)
# construct orthogonal system for coordinate transformation
w = delP
if np.all(w==[0,0,1]):
u = np.cross(w,[0,1,0]) #prevent failure if bhat = [0,0,1]
else:
u = np.cross(w,[0,0,1]) #this would fail if bhat = [0,0,1] (rare)
v = np.cross(w,u)
#normalize
u = u / np.sqrt(u.dot(u))
v = v / np.sqrt(v.dot(v))
w = w / np.sqrt(w.dot(w))
xfm = np.vstack([u,v,w]).T
#get helix path along (proxy) z axis reference frame
x_helix = self.rGyro*np.cos(self.omegaGyro*t + gyroPhase)
y_helix = self.diamag*self.rGyro*np.sin(self.omegaGyro*t + gyroPhase)
z_helix = np.zeros((len(t)))
#perform rotation to field line reference frame
helix = np.vstack([x_helix,y_helix,z_helix]).T
helix_rot = np.zeros((len(helix),3))
for j,coord in enumerate(helix):
helix_rot[j,:] = helix[j,0]*u + helix[j,1]*v + helix[j,2]*w
#perform translation to field line reference frame
helix_rot[:,0] += xGC
helix_rot[:,1] += yGC
helix_rot[:,2] += zGC
#update gyroPhase variable so next iteration starts here
gyroPhase = self.omegaGyro*t[-1] + gyroPhase
#append to helix trace
if helixTrace is None:
helixTrace = helix_rot
else:
helixTrace = np.vstack([helixTrace,helix_rot])
helixTrace*=1000.0 #scale for ParaView
print("Saving data to CSV and VTK formats")
#save data to csv format
head = 'X[mm],Y[mm],Z[mm]'
np.savetxt(controlfilePath+'helix.csv', helixTrace, delimiter=',', header=head)
#save data to vtk format
tools.createVTKOutput(controlfilePath+'helix.csv', 'trace', 'Gyro_trace')
if verbose==True:
print("V_perp = {:f} [m/s]".format(vPerp))
print("V_parallel = {:f} [m/s]".format(vParallel))
print("Cyclotron Freq = {:f} [rad/s]".format(self.omegaGyro[0]))
print("Cyclotron Freq = {:f} [Hz]".format(self.fGyro[0]))
print("Gyro Radius = {:f} [m]".format(self.rGyro[0][0]))
print("Number of gyro points = {:f}".format(len(helixTrace)))
print("Longitudinal dist between gyro points = {:f} [m]".format(magP/float(Nsteps)))
print("Each line segment length ~ {:f} [m]".format(magP))
return
def gyroTraceParallel(self, i, mode='MT'):
"""
parallelized gyro trace. called by multiprocessing.pool.map()
i is index of parallel run from multiprocessing, corresponds to a mesh face
we are tracing in the ROI
writes helical trace to self.helixTrace[i] in 2D matrix format:
columns = X,Y,Z
rows = steps up helical trace
also updates self.lastPhase for use in next iteration step
mode options are:
-Signed Volume Loop: 'SigVolLoop'
-Signed Volume Matrix: 'SigVolMat'
-Moller-Trumbore Algorithm: 'MT'
"""
#vector
delP = self.p1[i] - self.p0[i]
#magnitude
magP = np.sqrt(delP[0]**2 + delP[1]**2 + delP[2]**2)
#time it takes to transit line segment
delta_t = magP / (self.vParallelMC[self.GYRO_HLXmap][i])
#Number of steps in line segment
Tsample = self.TGyro[self.GYRO_HLXmap][i] / self.N_gyroSteps
Nsteps = int(delta_t / Tsample)
#length (in time) along guiding center
t = np.linspace(0,delta_t,Nsteps+1)
#guiding center location
xGC = np.linspace(self.p0[i,0],self.p1[i,0],Nsteps+1)
yGC = | np.linspace(self.p0[i,1],self.p1[i,1],Nsteps+1) | numpy.linspace |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import warnings
from numpy import asarray, arange, empty
from nipy.core.image.image import rollaxis as image_rollaxis
from nipy.core.api import ImageList, Image, \
CoordinateMap, AffineTransform, CoordinateSystem
class FmriImageList(ImageList):
"""
Class to implement image list interface for FMRI time series
Allows metadata such as volume and slice times
"""
def __init__(self, images=None, volume_start_times=None, slice_times=None):
"""
A lightweight implementation of an fMRI image as in ImageList
Parameters
----------
images: a sliceable object whose items are meant to be images,
this is checked by asserting that each has a `coordmap` attribute
volume_start_times: start time of each frame. It can be specified
either as an ndarray with len(images) elements
or as a single float, the TR. Defaults
to arange(len(images)).astype(np.float)
slice_times: ndarray specifying offset for each slice of each frame
See Also
--------
nipy.core.image_list.ImageList
Examples
--------
>>> from numpy import asarray
>>> from nipy.testing import funcfile
>>> from nipy.io.api import load_image
>>> # fmrilist and ilist represent the same data
>>> funcim = load_image(funcfile)
>>> fmrilist = FmriImageList.from_image(funcim)
>>> ilist = FmriImageList(funcim)
>>> print asarray(ilist).shape
(20, 2, 20, 20)
>>> print asarray(ilist[4]).shape
(2, 20, 20)
"""
ImageList.__init__(self, images=images)
if volume_start_times is None:
volume_start_times = 1.
v = | asarray(volume_start_times) | numpy.asarray |
"""#########################################################################
Author: <NAME>
Institute: Stony Brook University
Descriptions: generate midi music by the models.
----2017.12.27
#########################################################################"""
from Projects.AudioEffects.fetchData import fetchData
from dl4s import gaussCGRNN, gaussSRNN, gaussVRNN, ssRNNRBM
from dl4s import configCGRNN, configSRNN, configVRNN, configssRNNRBM
import os
import numpy as np
import matplotlib.pyplot as plt
import librosa
# CGRNN
configCGRNN.mode = 'full'
configCGRNN.Opt = 'SGD'
#configCGRNN.recType = 'GRU'
configCGRNN.aisLevel = 100
configCGRNN.aisRun = 100
configCGRNN.dimRec = [500]
configCGRNN.dimMlp = [400, 400]
configCGRNN.dimInput = 150
configCGRNN.dimState = 250
configCGRNN.init_scale = 0.01
configCGRNN.Gibbs = 1
configCGRNN.W_Norm = False
configCGRNN.muTrain = True
configCGRNN.alphaTrain = True
configCGRNN.phiTrain = False
configCGRNN.eventPath = './audioCGRNN-f-new2/'
configCGRNN.savePath = './audioCGRNN-f-new2/'
configCGRNN.loadPath = os.path.join(configCGRNN.savePath, 'CGRNN-f')
CGRNN = gaussCGRNN(configCGRNN)
# SRNN
configSRNN.Opt = 'SGD'
configSRNN.unitType = 'GRU'
configSRNN.mode = 'smooth'
configSRNN.dimRecD = [500]
configSRNN.dimRecA = [500]
configSRNN.dimEnc = [400]
configSRNN.dimDec = [400]
configSRNN.dimMLPx = [400]
configSRNN.dimInput = 150
configSRNN.dimState = 500
configSRNN.init_scale = 0.01
configSRNN.eventPath = './audioSRNN-s/'
configSRNN.savePath = './audioSRNN-s/'
#configSRNN.loadPath = os.path.join(configSRNN.savePath, 'SRNN-s')
SRNN = gaussSRNN(configSRNN)
# VRNN
configVRNN.Opt = 'SGD'
configVRNN.unitType = 'GRU'
configVRNN.dimRec = [500]
configVRNN.dimForX = [400]
configVRNN.dimForZ = [400]
dimForEnc = [400]
dimForDec = [400]
configVRNN.dimInput = 150
configVRNN.dimState = 500
configVRNN.init_scale = 0.01
configVRNN.eventPath = './audioVRNN/'
configVRNN.savePath = './audioVRNN/'
#configVRNN.loadPath = os.path.join(configVRNN.savePath, 'VRNN-I')
VRNN = gaussVRNN(configVRNN)
# ssRNN-RBM
configssRNNRBM.Opt = 'SGD'
configssRNNRBM.unitType = 'GRU'
configssRNNRBM.aisLevel = 1
configssRNNRBM.aisRun = 1
configssRNNRBM.dimRec = [500]
configssRNNRBM.dimMlp = [400, 400]
configssRNNRBM.dimInput = 150
configssRNNRBM.dimState = 250
configssRNNRBM.init_scale = 0.01
configssRNNRBM.Gibbs = 1
configssRNNRBM.W_Norm = False
configssRNNRBM.muTrain = True
configssRNNRBM.alphaTrain = True
configssRNNRBM.eventPath = './audiossRNNRBM/'
configssRNNRBM.savePath = './audiossRNNRBM/'
#configssRNNRBM.loadPath = os.path.join(configssRNNRBM.savePath, 'ssRNNRBM')
ssRnnRbm = ssRNNRBM(configssRNNRBM)
#
CGRNN_FOLDER = "Samples/CGRNN/"
SRNN_FOLDER = "Samples/SRNN/"
VRNN_FOLDER = "Samples/VRNN/"
ssRnnRbm_FOLDER = "Samples/ssRnnRbm/"
Ground_FOLDER = "Samples/"
# Check whether the target path exists.
if not os.path.exists(Ground_FOLDER):
os.makedirs(Ground_FOLDER)
if not os.path.exists(CGRNN_FOLDER):
os.makedirs(CGRNN_FOLDER)
if not os.path.exists(SRNN_FOLDER):
os.makedirs(SRNN_FOLDER)
if not os.path.exists(VRNN_FOLDER):
os.makedirs(VRNN_FOLDER)
if not os.path.exists(ssRnnRbm_FOLDER):
os.makedirs(ssRnnRbm_FOLDER)
#
# dataset.
Dataset = fetchData()
testSet = Dataset['test']
for i in range(10):
print('The ' + str(i) + '-th graph.')
CGRNN_sample = CGRNN.gen_function(x=testSet[i: i + 2])
SRNN_sample = SRNN.output_function(input=testSet[i: i + 2])
VRNN_sample = VRNN.output_function(input=testSet[i: i + 2])
ssRnnRbm_sample = ssRnnRbm.gen_function(x=testSet[i: i + 2])
# reshape them.
length = | np.shape(CGRNN_sample[0]) | numpy.shape |
from dataclasses import dataclass
from typing import List, Union, Optional, Tuple
import numpy
from .solver import Solver
from .solver_interface.solver_interface_utils import SolverOutput
from .utils.chebyshev_ball import chebyshev_ball
from .utils.constraint_utilities import constraint_norm, is_full_rank, \
detect_implicit_equalities, find_redundant_constraints
from .utils.general_utils import make_column, latex_matrix, select_not_in_list, ppopt_block, remove_size_zero_matrices
def calc_weakly_redundant(A, b, equality_set: List[int] = None, deterministic_solver='gurobi'):
if equality_set is None:
equality_set = []
kept_indices = []
for i in range(len(equality_set), A.shape[0]):
sol = chebyshev_ball(A, b, [*equality_set, i], deterministic_solver=deterministic_solver)
if sol is not None:
if sol.sol[-1] >= 1e-12:
kept_indices.append(i)
return [*equality_set, *kept_indices]
# noinspection GrazieInspection
@dataclass
class MPLP_Program:
r"""
The standard class for linear multiparametric programming
.. math::
\min \theta^TH^Tx + c^Tx
.. math::
\begin{align}
Ax &\leq b + F\theta\\
A_{eq}x &= b_{eq}\\
A_\theta \theta &\leq b_\theta\\
x &\in R^n\\
\end{align}
"""
# uses dataclass to create the __init__ with post-processing in the __post_init__
# member variables of the MPLP_Program class
A: numpy.ndarray
b: numpy.ndarray
c: numpy.ndarray
H: numpy.ndarray
A_t: numpy.ndarray
b_t: numpy.ndarray
F: numpy.ndarray
c_c: numpy.ndarray
c_t: numpy.ndarray
Q_t: numpy.ndarray
equality_indices: Union[List[int], numpy.ndarray]
solver: Solver = Solver()
def __init__(self, A, b, c, H, A_t, b_t, F, c_c=None, c_t=None, Q_t=None, equality_indices=None, solver=None):
self.A = A
self.b = b
self.c = c
self.H = H
self.A_t = A_t
self.b_t = b_t
self.F = F
if c_c is None:
c_c = numpy.array([[0.0]])
self.c_c = c_c
if c_t is None:
c_t = numpy.zeros((self.num_t(), 1))
self.c_t = c_t
if Q_t is None:
Q_t = numpy.zeros((self.num_t(), self.num_t()))
self.Q_t = Q_t
if equality_indices is None:
equality_indices = []
self.equality_indices = equality_indices
if solver is None:
solver = Solver()
self.solver = solver
def __post_init__(self):
"""Called after __init__ this is used as a post-processing step after the dataclass generated __init__."""
if self.equality_indices is None:
self.equality_indices = []
if len(self.equality_indices) != 0:
# move all equality constraints to the top
self.A = numpy.block(
[[self.A[self.equality_indices]], [numpy.delete(self.A, self.equality_indices, axis=0)]])
self.b = numpy.block(
[[self.b[self.equality_indices]], [numpy.delete(self.b, self.equality_indices, axis=0)]])
self.F = numpy.block(
[[self.F[self.equality_indices]], [numpy.delete(self.F, self.equality_indices, axis=0)]])
# reassign the equality constraint indices to the top indices after move
self.equality_indices = [i for i in range(len(self.equality_indices))]
# ensures that
self.warnings()
self.process_constraints(find_implicit_equalities=True)
def num_x(self) -> int:
"""Returns number of parameters."""
return self.A.shape[1]
def num_t(self) -> int:
"""Returns number of uncertain variables."""
return self.F.shape[1]
def num_constraints(self) -> int:
"""Returns number of constraints."""
return self.A.shape[0]
def num_inequality_constraints(self) -> int:
return self.A.shape[0] - len(self.equality_indices)
def num_equality_constraints(self) -> int:
return len(self.equality_indices)
def evaluate_objective(self, x: numpy.ndarray, theta_point: numpy.ndarray):
return theta_point.T @ self.H.T @ x + self.c.T @ x + self.c_c + self.c_t.T @ theta_point + 0.5 * theta_point.T @ self.Q_t @ theta_point
def warnings(self) -> List[str]:
"""Checks the dimensions of the matrices to ensure consistency."""
warning_list = list()
# check if b is a column vector
if len(self.b.shape) != 2:
warning_list.append(f'The b matrix is not a column vector b{self.b.shape}')
self.b = make_column(self.b)
warning_list.append('This has been corrected')
# check if c is a column matrix
if len(self.c.shape) != 2:
warning_list.append(f'The c vector is not a column vector c{self.c.shape}')
self.c = make_column(self.c)
warning_list.append('This has been corrected')
# check if c and A have consistent dimensions
if self.A.shape[1] != self.c.shape[0]:
warning_list.append(
f'The A and b matrices disagree in number of parameters A{self.A.shape}, c{self.c.shape}')
# check is A and b agree with each other
if self.A.shape[0] != self.b.shape[0]:
warning_list.append(f'The A and b matrices disagree in vertical dimension A{self.A.shape}, b{self.b.shape}')
# check is A and b agree with each other
if self.A_t.shape[0] != self.b_t.shape[0]:
warning_list.append(
f'The A and b matrices disagree in vertical dimension A{self.A_t.shape}, b{self.b_t.shape}')
# check dimensions of A and F matrix
if self.A.shape[0] != self.F.shape[0]:
warning_list.append(
f"The A and F matrices disagree in vertical dimension A{self.A.shape}, F {self.F.shape}")
# return warnings
return warning_list
# Checks warnings again and prints warnings
def display_warnings(self) -> None:
"""Displaces warnings."""
print(self.warnings())
def display_latex(self) -> None:
"""Displaces Latex text of the multiparametric problem."""
output = self.latex()
for i in output:
print(i)
def latex(self) -> List[str]:
"""
Generates latex of the multiparametric problem
:return: returns latex of the
"""
output = list()
# create string variables for x and theta
x = ['x_{' + f'{i}' + '}' for i in range(self.num_x())]
theta = ['\\theta_{' + f'{i}' + '}' for i in range(self.num_t())]
# create the latex matrices that represent x and theta
# using the latex_matrix function from utils.general_utils
x_latex = latex_matrix(x)
theta_latex = latex_matrix(theta)
# builds the objective latex
added_term = ''
if not numpy.allclose(self.H, numpy.zeros_like(self.H)):
added_term = " + " + theta_latex + '^{T}' + latex_matrix(self.H) + x_latex
obj = "$$" + "\\min_{x}" + latex_matrix(self.c) + "^T" + x_latex + added_term + "$$"
output.append(obj)
# adds the inequality constraint latex if applicable
if self.num_constraints() - len(self.equality_indices) > 0:
A_ineq = latex_matrix(select_not_in_list(self.A, self.equality_indices))
b_ineq = latex_matrix(select_not_in_list(self.b, self.equality_indices))
F_ineq = latex_matrix(select_not_in_list(self.F, self.equality_indices))
output.append("$$" + ''.join([A_ineq, x_latex, '\\leq', b_ineq, '+', F_ineq, theta_latex]) + "$$")
# adds the equality constraint latex if applicable
if len(self.equality_indices) > 0:
A_eq = latex_matrix(self.A[self.equality_indices])
b_eq = latex_matrix(self.b[self.equality_indices])
F_eq = latex_matrix(self.F[self.equality_indices])
output.append("$$" + ''.join([A_eq, x_latex, '=', b_eq, '+', F_eq, theta_latex]) + "$$")
# adds the theta constraint latex
output.append("$$" + latex_matrix(self.A_t) + theta_latex + '\\leq' + latex_matrix(self.b_t) + "$$")
return output
def scale_constraints(self) -> None:
"""Rescales the constraints of the multiparametric problem to ||[A|-F]||_i = 1, in the L2 sense."""
# scale the [A| b, F] constraint by the H = [A|-F] rows
H = numpy.block([self.A, -self.F])
norm = constraint_norm(H)
self.A = self.A / norm
self.b = self.b / norm
self.F = self.F / norm
# scale the A_t constraint by the norm of it's rows
norm = constraint_norm(self.A_t)
self.A_t = self.A_t / norm
self.b_t = self.b_t / norm
def process_constraints(self, find_implicit_equalities=True) -> None:
"""Removes redundant constraints from the multiparametric programming problem."""
self.constraint_datatype_conversion()
self.scale_constraints()
if find_implicit_equalities:
problem_A = ppopt_block([[self.A, -self.F]])
problem_b = ppopt_block([[self.b]])
constraint_pairs = detect_implicit_equalities(problem_A, problem_b)
keep = [i[0] for i in constraint_pairs]
remove = [i[1] for i in constraint_pairs]
keep = list(set(keep))
keep.sort()
remove = list(set(remove))
remove.sort()
# make sure to only remove the unneeded inequalities -> only for duplicate constraints
remove = [i for i in remove if i not in keep]
# our temporary new active set for the problem
temp_active_set = [*self.equality_indices, *keep]
# what we are keeping
survive = lambda x: x not in temp_active_set and x not in remove
kept_ineqs = [i for i in range(self.num_constraints()) if survive(i)]
# data marshaling
A_eq = self.A[temp_active_set]
b_eq = self.b[temp_active_set]
F_eq = self.F[temp_active_set]
A_ineq = self.A[kept_ineqs]
b_ineq = self.b[kept_ineqs]
F_ineq = self.F[kept_ineqs]
self.A = ppopt_block([[A_eq], [A_ineq]])
self.b = ppopt_block([[b_eq], [b_ineq]])
self.F = ppopt_block([[F_eq], [F_ineq]])
# update problem active set
self.equality_indices = [i for i in range(len(temp_active_set))]
# recalculate bc we have moved everything around
problem_A = ppopt_block([[self.A, -self.F], [numpy.zeros((self.A_t.shape[0], self.A.shape[1])), self.A_t]])
problem_b = ppopt_block([[self.b], [self.b_t]])
saved_indices = find_redundant_constraints(problem_A, problem_b, self.equality_indices,
solver=self.solver.solvers['lp'])
# saved_indices = calculate_redundant_constraints(problem_A, problem_b)
saved_upper = [i for i in saved_indices if i < self.A.shape[0]]
# saved_lower = [i - self.A.shape[0] for i in saved_indices if i >= self.A.shape[0]]
self.A = self.A[saved_upper]
self.F = self.F[saved_upper]
self.b = self.b[saved_upper]
# recalculate bc we have moved everything around
problem_A = ppopt_block([[self.A, -self.F], [numpy.zeros((self.A_t.shape[0], self.A.shape[1])), self.A_t]])
problem_b = ppopt_block([[self.b], [self.b_t]])
# saved_indices = calc_weakly_redundant(problem_A, problem_b, self.equality_indices)
# saved_indices = calculate_redundant_constraints(problem_A, problem_b)
saved_upper = [i for i in saved_indices if i < self.A.shape[0]]
# saved_lower = [i - self.A.shape[0] for i in saved_indices if i >= self.A.shape[0]]
self.A = self.A[saved_upper]
self.F = self.F[saved_upper]
self.b = self.b[saved_upper]
# print(f'Removed {self.A.shape[0] - len(saved_upper)} Weakly Redundant Constraints')
self.scale_constraints()
def constraint_datatype_conversion(self) -> None:
"""
Makes sure that all the data types of the problem are in fp64, this is important as some solvers do not accept integral data types
"""
self.A = self.A.astype('float64')
self.c = self.c.astype('float64')
self.b = self.b.astype('float64')
self.F = self.F.astype('float64')
self.A_t = self.A_t.astype('float64')
self.b_t = self.b_t.astype('float64')
self.H = self.H.astype('float64')
self.c_c = self.c_c.astype('float64')
self.c_t = self.c_t.astype('float64')
self.Q_t = self.Q_t.astype('float64')
def solve_theta(self, theta_point: numpy.ndarray, deterministic_solver='gurobi') -> Optional[SolverOutput]:
r"""
Substitutes theta into the multiparametric problem and solves the following optimization problem
.. math::
\min_{x} \tilde{c}^Tx
.. math::
\begin{align}
Ax &\leq \tilde{b}\\
A_{eq}x &= \tilde{b}_{eq}\\
x &\in R^n\\
\end{align}
:param theta_point: An uncertainty realization
:param deterministic_solver: Deterministic solver to use to solve the above quadratic program
:return: The Solver output of the substituted problem, returns None if not solvable
"""
if not numpy.all(self.A_t @ theta_point <= self.b_t):
return None
sol_obj = self.solver.solve_lp(c=self.H @ theta_point + self.c, A=self.A, b=self.b + self.F @ theta_point,
equality_constraints=self.equality_indices)
if sol_obj is not None:
sol_obj.obj += self.c_c + self.c_t.T @ theta_point + 0.5 * theta_point.T @ self.Q_t @ theta_point
return sol_obj
return None
def solve_theta_variable(self) -> Optional[SolverOutput]:
"""
Leaves Theta as an optimization variable, solves the following problem
define y' = [x^T theta^T]^T
min [c^T 0]^Ty'
s.t. [A -F]y' <= b
:return: the Solver output of the substituted problem, returns None if not solvable
"""
A_prime = numpy.block([self.A, -self.F])
c_prime = numpy.block([[self.c], [numpy.zeros((self.num_t(), 1))]])
return self.solver.solve_lp(c=c_prime, A=A_prime, b=self.b, equality_constraints=self.equality_indices)
def optimal_control_law(self, active_set: List[int]) -> Tuple:
r"""
This function calculates the optimal control law corresponding to an active set combination
:param active_set: an active set combination
:return: a tuple of the optimal x* and λ* functions in the following form(A_x, b_x, A_l, b_l)
.. math::
\begin{align*}
x^*(\theta) &= A_x\theta + b_x\\
\lambda^*(\theta) &= A_l\theta + b_l\\
\end{align*}
"""
aux = numpy.linalg.pinv(self.A[active_set])
parameter_A = aux @ self.F[active_set]
parameter_b = aux @ self.b[active_set]
lagrange_A = -aux.T @ self.H
lagrange_b = -aux.T @ self.c
return parameter_A, parameter_b, lagrange_A, lagrange_b
# noinspection SpellCheckingInspection
def check_active_set_rank(self, active_set):
r"""
Checks the rank of the matrix is equal to the cardinality of the active set
.. math::
\textrm{Rank}(A_{\mathcal{A}}) = |\mathcal{A}|
:param active_set:
:return: True if full rank otherwise false
"""
return is_full_rank(self.A, active_set)
def check_feasibility(self, active_set, check_rank=True) -> bool:
r"""
Checks the feasibility of an active set combination w.r.t. a multiparametric program.
.. math::
\min_{x,\theta} 0
.. math::
\begin{align}
Ax &\leq b + F\theta\\
A_{i}x &= b_{i} + F_{i}\theta, \quad \forall i \in \mathcal{A}\\
A_\theta \theta &\leq b_\theta\\
x &\in R^n\\
\theta &\in R^m
\end{align}
:param active_set: an active set
:param check_rank: Checks the rank of the LHS matrix for a violation of LINQ if True (default)
:return: True if active set feasible else False
"""
if check_rank:
if not is_full_rank(self.A, active_set):
return False
A = ppopt_block([[self.A, -self.F], [numpy.zeros((self.A_t.shape[0], self.num_x())), self.A_t]])
b = ppopt_block([[self.b], [self.b_t]])
c = numpy.zeros((self.num_x() + self.num_t(), 1))
return self.solver.solve_lp(c, A, b, active_set) is not None
def check_optimality(self, active_set):
r"""
Tests if the active set is optimal for the provided mpLP program
.. math::
\max_{x, \theta, \lambda, s, t} \quad t
.. math::
\begin{align*}
H \theta + (A_{A_i})^T \lambda_{A_i} + c &= 0\\
A_{A_i}x - b_ai-F_{a_i}\theta &= 0\\
A_{A_j}x - b_{A_j}-F_{A_j}\theta + s{j_k} &= 0\\
t*e_1 &\leq \lambda_{A_i}\\
t*e_2 &\leq s_{J_i}\\
t &\geq 0\\
\lambda_{A_i} &\geq 0\\
s_{J_i} &\geq 0\\
A_t\theta &\leq b_t
\end{align*}
:param active_set: active set being considered in the optimality test
:return: dictionary of parameters, or None if active set is not optimal
"""
if len(active_set) != self.num_x():
return False
zeros = lambda x, y: numpy.zeros((x, y))
num_x = self.num_x()
num_constraints = self.num_constraints()
num_active = len(active_set)
num_theta_c = self.A_t.shape[0]
num_activated = len(active_set) - len(self.equality_indices)
inactive = [i for i in range(num_constraints) if i not in active_set]
num_inactive = num_constraints - num_active
num_theta = self.num_t()
# this will be used to build the optimality expression
A_list = list()
b_list = list()
# 1) Qu + H theta + (A_Ai)^T lambda_Ai + c = 0
# if num_active > 0:
# A_list.append([program.Q, zeros(num_x, num_theta), program.A[equality_indices].T, zeros(num_x, num_inactive), zeros(num_x, 1)])
A_list.append([zeros(num_x, num_x), self.H, self.A[active_set].T, zeros(num_x, num_inactive), zeros(num_x, 1)])
b_list.append([-self.c])
# 2) A_Ai*u - b_ai-F_ai*theta = 0
A_list.append([self.A[active_set], -self.F[active_set], zeros(num_active, num_constraints + 1)])
b_list.append([self.b[active_set]])
# 3) A_Aj*u - b_aj-F_aj*theta + sj_k= 0
A_list.append(
[self.A[inactive], -self.F[inactive], zeros(num_inactive, num_active), numpy.eye(num_inactive),
zeros(num_inactive, 1)])
b_list.append([self.b[inactive]])
# 4) t*e_1 <= lambda_Ai
# edited on 2/19/2021 to remove the positivity constraint on the equality constraints
if num_activated >= 0:
A_list.append(
[zeros(num_activated, num_x + num_theta + num_active - num_activated), -numpy.eye(num_activated),
zeros(num_activated, num_inactive), numpy.ones((num_activated, 1))])
# A_list.append([zeros(num_active, num_x + num_theta), -numpy.eye(num_active), zeros(num_active, num_inactive),numpy.ones((num_active, 1))])
b_list.append([zeros(num_activated, 1)])
# b_list.append([zeros(num_active, 1)])
# 5) t*e_2 <= s_Ji
A_list.append([zeros(num_inactive, num_x + num_theta + num_active), - | numpy.eye(num_inactive) | numpy.eye |
from hokuyolx import HokuyoLX
import numpy as np
track_width = 2.0 # wheel center to center distance of car
forward_constant = 1.0 # multiplier for speed of car, adjust for proper braking distance
car_length = 6.0 # length of car from front to back wheels, center to center
graph = True
if graph:
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.patches import Arc
def in_path(points, speed, angle):
"""
Given an array of x, y points, the speed of the cart, and the front steering angle, returns whether or not the points are in the cart's predicted path.
:param points: np.ndarray of shape (n, 2) and dtype float64 - Array of x, y data points returned from the LIDAR scanner.
:param speed: float - Speed of the golf cart
:param angle: float - Steering angle of the golf cart, in degrees. 0 is straight, positive is left and negative is right
:return: Boolean - whether there are any data points in the cart's predicted path
"""
# workaround for angle of 0
if angle < 1e-4:
angle = 1e-4
r_center = car_length * np.tan(np.radians(90 - angle)) # left turn = positive angle
# transform points to match new origin at turn center
points[:, 0] += r_center
points[:, 1] += car_length
r_cf = np.hypot(r_center, car_length) # front center radius
r_left = np.hypot(r_center - track_width / 2, car_length) # left wheel turn radius
r_right = np.hypot(r_center + track_width / 2, car_length) # right wheel turn radius
y_max = car_length + forward_constant * speed
# check if y_max is past the turning circle
y_large = np.minimum( | np.fabs(r_left) | numpy.fabs |
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import cross_validate
from sklearn.metrics import (make_scorer,
accuracy_score,
recall_score,
precision_score)
import numpy as np
from collections import Counter
import pickle
import sqlite3
import logging
import argparse
import re
logger = logging.getLogger(__name__)
def load_data(db, reactions, topologies):
np.random.seed(2)
query = '''
SELECT fingerprint, topology, collapsed
FROM cages
WHERE
reaction IN ({}) AND
collapsed IS NOT NULL AND
topology IN ({})
'''.format(', '.join('?'*len(reactions)),
', '.join('?'*len(topologies)))
results = ((eval(fp), top, label) for fp, top, label in
db.execute(query, reactions+topologies))
fps, tops, labels = zip(*results)
tops = LabelBinarizer().fit_transform(tops)
fps = np.concatenate((fps, tops), axis=1)
fps, labels = shuffle(fps, labels)
return np.array(fps), np.array(labels)
def train(db, cv, reactions, topologies, table, save):
if table:
logger.setLevel(logging.ERROR)
logger.debug(f'Reactions: {reactions}.')
logger.debug(f'Topologies: {topologies}.')
fingerprints, labels = load_data(db=db,
reactions=reactions,
topologies=topologies)
logger.debug(f'Fingerprint shape is {fingerprints.shape}.')
logger.debug(f'Collected labels:\n{Counter(labels)}')
clf = RandomForestClassifier(
n_estimators=100,
n_jobs=-1,
class_weight='balanced')
| np.random.seed(4) | numpy.random.seed |
'''
Defines the UnitaryDefinitions class.
'''
import numpy as np
import scipy.linalg
from .unitary import Unitary
from .unitary_sequence_entry import UnitarySequenceEntry
class UnitaryDefinitions:
'''
Provides methods to create several commonly-used unitaries.
'''
@staticmethod
def rx(theta: float) -> Unitary:
'''
Rotation around the x-axis.
:param theta: Angle of rotation.
:type theta: float
:return: The unitary object.
:rtype: Unitary
'''
dimension = 2
parameter_dict = {"θ": (theta, True)}
operation_name = "Rx"
return Unitary(dimension, np.array(
[[np.cos(theta / 2), -1j * np.sin(theta / 2)],
[-1j * np.sin(theta / 2), np.cos(theta / 2)]]
), operation_name, parameter_dict)
@staticmethod
def ry(theta: float) -> Unitary:
'''
Rotation around the y-axis.
:param theta: Angle of rotation.
:type theta: float
:return: The unitary object.
:rtype: Unitary
'''
dimension = 2
parameter_dict = {"θ": (theta, True)}
operation_name = "Ry"
return Unitary(dimension, np.array(
[[np.cos(theta / 2), -np.sin(theta / 2)],
[np.sin(theta / 2), | np.cos(theta / 2) | numpy.cos |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class ISO:
"""
Reads in MIST isochrone files.
"""
def __init__(self, filename, verbose=True):
"""
Args:
filename: the name of .iso file.
Usage:
>> iso = read_mist_models.ISO('MIST_v1.0_feh_p0.00_afe_p0.0_vvcrit0.4.iso')
>> age_ind = iso.age_index(8.0)
>> logTeff = iso.isos[age_ind]['log_Teff']
>> logL = iso.isos[age_ind]['log_L']
>> plt.plot(logTeff, logL) #plot the HR diagram for logage = 8.0
Attributes:
version Dictionary containing the MIST and MESA version numbers.
abun Dictionary containing Yinit, Zinit, [Fe/H], and [a/Fe] values.
rot Rotation in units of surface v/v_crit.
ages List of ages.
num_ages Number of isochrones.
hdr_list List of column headers.
isos Data.
"""
self.filename = filename
if verbose:
print('Reading in: ' + self.filename)
self.version, self.abun, self.rot, self.ages, self.num_ages, self.hdr_list, self.isos = self.read_iso_file()
def read_iso_file(self):
"""
Reads in the isochrone file.
Args:
filename: the name of .iso file.
"""
#open file and read it in
with open(self.filename) as f:
content = [line.split() for line in f]
version = {'MIST': content[0][-1], 'MESA': content[1][-1]}
abun = {content[3][i]:float(content[4][i]) for i in range(1,5)}
rot = float(content[4][-1])
num_ages = int(content[6][-1])
#read one block for each isochrone
iso_set = []
ages = []
counter = 0
data = content[8:]
for i_age in range(num_ages):
#grab info for each isochrone
num_eeps = int(data[counter][-2])
num_cols = int(data[counter][-1])
hdr_list = data[counter+2][1:]
formats = tuple([np.int32]+[np.float64 for i in range(num_cols-1)])
iso = np.zeros((num_eeps),{'names':tuple(hdr_list),'formats':tuple(formats)})
#read through EEPs for each isochrone
for eep in range(num_eeps):
iso_chunk = data[3+counter+eep]
iso[eep]=tuple(iso_chunk)
iso_set.append(iso)
ages.append(iso[0][1])
counter+= 3+num_eeps+2
return version, abun, rot, ages, num_ages, hdr_list, iso_set
def age_index(self, age):
"""
Returns the index for the user-specified age.
Args:
age: the age of the isochrone.
"""
diff_arr = abs(np.array(self.ages) - age)
age_index = np.where(diff_arr == min(diff_arr))[0][0]
if ((age > max(self.ages)) | (age < min(self.ages))):
print('The requested age is outside the range. Try between ' + str(min(self.ages)) + ' and ' + str(max(self.ages)))
return age_index
class ISOCMD:
"""
Reads in MIST CMD files.
"""
def __init__(self, filename, verbose=True):
"""
Args:
filename: the name of .iso.cmd file.
Usage:
>> isocmd = read_mist_models.ISOCMD('MIST_v1.0_feh_p0.00_afe_p0.0_vvcrit0.4.iso.cmd')
>> age_ind = isocmd.age_index(7.0)
>> B = isocmd.isocmds[age_ind]['Bessell_B']
>> V = isocmd.isocmds[age_ind]['Bessell_V']
>> plt.plot(B-V, V) #plot the CMD for logage = 7.0
Attributes:
version Dictionary containing the MIST and MESA version numbers.
photo_sys Photometric system.
abun Dictionary containing Yinit, Zinit, [Fe/H], and [a/Fe] values.
Av_extinction Av for CCM89 extinction.
rot Rotation in units of surface v/v_crit.
ages List of ages.
num_ages Number of ages.
hdr_list List of column headers.
isocmds Data.
"""
self.filename = filename
if verbose:
print('Reading in: ' + self.filename)
self.version, self.photo_sys, self.abun, self.Av_extinction, self.rot, self.ages, self.num_ages, self.hdr_list, self.isocmds = self.read_isocmd_file()
def read_isocmd_file(self):
"""
Reads in the cmd file.
Args:
filename: the name of .iso.cmd file.
"""
#open file and read it in
with open(self.filename) as f:
content = [line.split() for line in f]
version = {'MIST': content[0][-1], 'MESA': content[1][-1]}
photo_sys = ' '.join(content[2][4:])
abun = {content[4][i]:float(content[5][i]) for i in range(1,5)}
rot = float(content[5][-1])
num_ages = int(content[7][-1])
Av_extinction = float(content[8][-1])
#read one block for each isochrone
isocmd_set = []
ages = []
counter = 0
data = content[10:]
for i_age in range(num_ages):
#grab info for each isochrone
num_eeps = int(data[counter][-2])
num_cols = int(data[counter][-1])
hdr_list = data[counter+2][1:]
formats = tuple([np.int32]+[np.float64 for i in range(num_cols-1)])
isocmd = np.zeros((num_eeps),{'names':tuple(hdr_list),'formats':tuple(formats)})
#read through EEPs for each isochrone
for eep in range(num_eeps):
isocmd_chunk = data[3+counter+eep]
isocmd[eep]=tuple(isocmd_chunk)
isocmd_set.append(isocmd)
ages.append(isocmd[0][1])
counter+= 3+num_eeps+2
return version, photo_sys, abun, Av_extinction, rot, ages, num_ages, hdr_list, isocmd_set
def age_index(self, age):
"""
Returns the index for the user-specified age.
Args:
age: the age of the isochrone.
"""
diff_arr = abs(np.array(self.ages) - age)
age_index = np.where(diff_arr == min(diff_arr))[0][0]
if ((age > max(self.ages)) | (age < min(self.ages))):
print('The requested age is outside the range. Try between ' + str(min(self.ages)) + ' and ' + str(max(self.ages)))
return age_index
class EEP:
"""
Reads in and plots MESA EEP files.
"""
def __init__(self, filename, verbose=True):
"""
Args:
filename: the name of .track.eep file.
Usage:
>> eep = read_mist_models.EEP('00200M.track.eep')
>> logTeff, center_h1, mdot = eep.eeps['log_Teff'], eep['center_h1'], eep['star_mdot']
Attributes:
version Dictionary containing the MIST and MESA version numbers.
abun Dictionary containing Yinit, Zinit, [Fe/H], and [a/Fe] values.
rot Rotation in units of surface v/v_crit.
minit Initial mass in solar masses.
hdr_list List of column headers.
eeps Data.
"""
self.filename = filename
if verbose:
print('Reading in: ' + self.filename)
self.version, self.abun, self.rot, self.minit, self.hdr_list, self.eeps = self.read_eep_file()
def read_eep_file(self):
"""
Reads in the EEP file.
Args:
filename: the name of .track.eep file.
"""
eeps = | np.genfromtxt(self.filename, skip_header=11, names=True) | numpy.genfromtxt |
import config as conf
import os
import sys
import numpy as np
from copy import deepcopy
import fnmatch
import basic_plot_functions
import var_utils as vu
import h5py as h5
'''
Directory structure and file names for ctest (or cycling experiments):
test/
├── Data/
│ ├── sondes_obs_2018041500_m.nc4 (or sondes_obs_2018041500.h5)
│ ├── satwind_obs_2018041500_m.nc4 (or satwind_obs_2018041500.h5)
│ ├── ...
├── graphics/
│ ├── plot_obs_loc.py
│ ├── ...
How to run it:
python plot_obs_loc.py cycling 2018041500
python plot_obs_loc.py ctest 2018041500
'''
test = str(sys.argv[1])
Date = str(sys.argv[2])
def readdata():
'''
Observation file name used in ctest: | Observation file name used in cycling:
aircraft_obs_2018041500_m.nc4 | aircraft_obs_2018041500.h5
amsua_n19_obs_2018041500_m.nc4 | amsua_n19_obs_2018041500.h5
satwind_obs_2018041500_m.nc4 | satwind_obs_2018041500.h5
sondes_obs_2018041500_m.nc4 | sondes_obs_2018041500.h5
gnssro_obs_2018041500_s.nc4 | gnssro_obs_2018041500.h5
'''
obsfiles = []
if (test == 'ctest'):
suffix = '_m'
string = '_obs_'+Date+suffix+'.nc4'
nChar1 = -21 # remove "_obs_2018041500_m.nc4" to get obstype string
nChar2 = -4 # remove '.nc4' to get output string
elif (test == 'cycling'):
suffix = ''
string = '_obs_'+Date+suffix+'.h5'
nChar1 = -18 # remove "_obs_2018041500.h5" to get obstype string
nChar2 = -3 # remove '.h5' to get output string
PreQCMaxvalueConv = 3
PreQCMinvalueConv = -90
PreQCMaxvalueAmsua = 0
#search all obs files and get obs types from Data dir:
allobstypes = []
for files in os.listdir('../Data/'):
if fnmatch.fnmatch(files, '*'+string):
allobstypes.append(files[:nChar1])
#get obs types with 'process': True from dictionary conf.DiagSpaceConfig
ObsSpaceDict = {}
obsfiles_prefix = []
for (key,baseval) in conf.DiagSpaceConfig.items():
if baseval['process'] and baseval['DiagSpaceGrp'] != conf.model_s:
ObsSpaceDict = deepcopy(baseval)
obsfiles_prefix.append(key)
#get the file name we want to plot:
match = set(allobstypes) & set(obsfiles_prefix)
print('match=', match)
obsfiles = [x + string for x in match]
if (test == 'ctest'):
obsfiles = [f.replace('gnssro_obs_2018041500_m.nc4', 'gnssro_obs_2018041500_s.nc4') for f in obsfiles]
print(obsfiles)
for file_name in obsfiles:
nc = h5.File("../Data/"+file_name, 'r')
print('Plotting:', file_name)
varlist = []
for node in nc:
if type(nc[node]) is h5._hl.group.Group:
for var in nc[node]:
varlist += [node+'/'+var]
if 'MetaData/station_id' in varlist:
stationidnc = nc['MetaData/station_id']
#for gnss:
elif 'MetaData/occulting_sat_id' in varlist:
stationidnc = nc['MetaData/occulting_sat_id']
if (test == 'cycling'):
recordNum = nc['MetaData/record_number']
#for radiances:
else:
nstation = 0
obstype = file_name[:nChar1]
latnc = nc['MetaData/latitude']
lonnc = nc['MetaData/longitude']
ObsSpaceInfo = conf.DiagSpaceConfig.get(obstype,conf.nullDiagSpaceInfo)
channels = ObsSpaceInfo.get('channels',[vu.miss_i])
#select variables with the suffix 'ObsValue'
obslist = [obs for obs in varlist if (obs[:8] == 'ObsValue')]
#print('obslist=',obslist)
obs_type = file_name[:nChar1]
out_name = file_name[:nChar2]
for var in obslist:
var_name = var[9:] # e.g. remove 'ObsValue/' from 'ObsValue/air_temperature'
PreQC = 'PreQC/'+var_name
if var_name == 'refractivity':
var_unit = 'N-unit'
elif var_name == 'bending_angle':
var_unit = 'Radians'
else:
var_unit = vu.varDictObs[var_name][0]
levbin='all'
if channels[0] == vu.miss_i:
obsnc = nc[var]
obsnc = np.asarray(obsnc)
stationidnc_array = []
recordnc_array = []
if (obstype == 'gnssro'):
obsnc[ | np.less(obsnc, -999) | numpy.less |
import os
import numpy as np
import tensorflow as tf
import cv2
from utils.load_config import load_config
from utils.load_data import load_data
from utils.extraction_model import load_extraction_model
np.random.seed(0)
np.set_printoptions(precision=3, suppress=True, linewidth=150)
"""
test script to try the computations of feature positions within a feature map
run: python -m tests.CNN.t04_optical_flow_on_ft
"""
# define configuration
config_path = 'CNN_t04_optical_flow_m0001.json'
# load config
config = load_config(config_path, path='configs/CNN')
# create save folder in case
path = os.path.join("models/saved", config["config_name"])
if not os.path.exists(path):
os.mkdir(path)
# choose feature map index
eyebrow_ft_idx = 148 # the index comes from t02_find_semantic_units, it is the highest IoU score for eyebrow
# load and define model
model = load_extraction_model(config, input_shape=tuple(config["input_shape"]))
model = tf.keras.Model(inputs=model.input, outputs=model.get_layer(config['v4_layer']).output)
size_ft = tuple(np.shape(model.output)[1:3])
size_ft = (280, 280)
print("size_ft", size_ft)
# load morphing sequence
data = load_data(config)
# raw_seq = load_data(config, get_raw=True)[0]
print("[DATA] loaded sequence")
# predict
preds = model.predict(data)[..., eyebrow_ft_idx]
preds = np.expand_dims(preds, axis=3)
preds = preds / np.amax(preds) # normalize so we can compare with the positions
print("[PRED] Shape predictions", np.shape(preds))
print("[PRED] Finish to predict")
print()
# ----------------------------------------------------------------------------------------------------------------------
# test 1 - compute optical flow on eye brow feature map
# code taken frm: https://nanonets.com/blog/optical-flow/
# transform to gray images
predictions = preds[..., 0]
# plot first feature map
pred0 = np.array(predictions[0]) * 255
print("shape pred0", np.shape(pred0))
pred0_fm148 = cv2.cvtColor(pred0.astype(np.uint8), cv2.COLOR_GRAY2BGR)
cv2.imwrite(os.path.join(path, 'pred0_fm148.png'), pred0_fm148)
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros(size_ft + (3, )) # add tuple to set up size of rgb image
print("[TEST 1] shape mask", np.shape(mask))
# Sets image saturation to maximum
mask[..., 1] = 255
# initialize video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = 30
of_video_filename = os.path.join(path, 'optical_flow_fm148.mp4')
of_out = cv2.VideoWriter(of_video_filename, fourcc, fps, size_ft)
# create raw output
fm_video_filename = os.path.join(path, 'raw_output_fm148.mp4')
fm_out = cv2.VideoWriter(fm_video_filename, fourcc, fps, size_ft)
# for i in range(1, np.shape(predictions)[0]):
for i in range(1, 5):
# get current and previous frame
prev_frame = np.array(np.array(predictions[i - 1]) * 255).astype(np.uint8)
curr_frame = np.array(np.array(predictions[i]) * 255).astype(np.uint8)
# tests
# curr_frame = np.zeros(size_ft).astype(np.uint8)
prev_frame = np.zeros(size_ft).astype(np.uint8)
curr_frame = np.zeros(size_ft).astype(np.uint8)
# single dot
# prev_frame[100:101, (30+i):(31+i)] = 255 # for size_ft (280, 280)
# curr_frame[100:101, (31+i):(32+i)] = 255
# cube
prev_frame[100:110, (30+i):(40+i)] = 255 # for size_ft (280, 280)
curr_frame[100:110, (31+i):(41+i)] = 255
# single dot
# prev_frame[10:11, (3+i):(4+i)] = 255 # for size_ft (28, 28)
# curr_frame[10:11, (4+i):(5+i)] = 255
# cube
# prev_frame[10:14, (3+i):(7+i)] = 255 # for size_ft (28, 28)
# curr_frame[10:14, (4+i):(8+i)] = 255
print("shape curr_frame", np.shape(curr_frame))
print("min max curr_frame", np.amin(curr_frame), np.amax(curr_frame))
# transform current frame to BGR for visualization
fm = cv2.cvtColor(curr_frame, cv2.COLOR_GRAY2BGR)
print("min max fm", np.amin(fm), np.amax(fm))
# compute optical flow
# parameters explanation: https://www.geeksforgeeks.org/opencv-the-gunnar-farneback-optical-flow/
# - winsize: It is the average window size, larger the size, the more robust the algorithm is to noise, and
# provide fast motion detection, though gives blurred motion fields.
# - poly_n : It is typically 5 or 7, it is the size of the pixel neighbourhood which is used to find polynomial
# expansion between the pixels.
flow = cv2.calcOpticalFlowFarneback(prev_frame, curr_frame,
flow=None,
pyr_scale=0.5,
levels=1,
winsize=3, # 15
iterations=5, # 3
poly_n=5, # 5
poly_sigma=1.2,
flags=0)
# build image
# Computes the magnitude and angle of the 2D vectors
magnitude, angle = cv2.cartToPolar(flow[..., 0], flow[..., 1])
print("shape magnitude", np.shape(magnitude))
print("min max magnitude", np.amin(magnitude), np.amax(magnitude))
print("shape angle", np.shape(angle))
print("min max angle", np.amin(angle), np.amax(angle))
# Sets image hue according to the optical flow direction
mask[..., 0] = angle * 180 / np.pi / 2
# Sets image value according to the optical flow magnitude (normalized)
mask[..., 2] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
# Converts HSV to RGB (BGR) color representation
print("min max mask[..., 0]", np.amin(mask[..., 0]), np.amax(mask[..., 0]))
print("min max mask[..., 1]", np.amin(mask[..., 1]), np.amax(mask[..., 1]))
print("min max mask[..., 2]", np.amin( mask[..., 2]), np.amax( mask[..., 2]))
bgr = cv2.cvtColor(mask.astype('float32'), cv2.COLOR_HSV2BGR)
# bgr = np.array(bgr).astype(np.uint8)
# bgr[bgr < 0] = 0
# bgr[:5, :5, 0] = 255
print("min max bgr", np.amin(bgr), | np.amax(bgr) | numpy.amax |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""This submodule implements useful helper functions for characterizing whole experiments (multiple wells).
Exports:
Plate, a class for running characterization over a whole plate and storing results
"""
import collections
import logging
from pathlib import Path
from typing import Dict, Iterable, List, Literal, Optional, Sequence, Tuple, Type, TypeVar, Union, cast
import matplotlib.pyplot as plt
from matplotlib.axes import SubplotBase
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from staticchar.basic_types import TIME, ArrayLike
from staticchar.config import CharacterizationConfig, CharacterizationMethod, GrowthModelType
from staticchar.datasets import Dataset
from staticchar.gradients import transcriptional_activity_ratio
from staticchar.integrals import integrate
from staticchar.models.base import BaseModel
from staticchar.models.gompertz import GompertzModel
from staticchar.models.logistic import LogisticModel
from staticchar.plotting.core import AnnotationSpec, mark_phase
from staticchar.plotting.growth_model import plot_growth_model
from staticchar.plotting.signals import plot_integration, plot_signals_against_reference, plot_signals_against_time
from staticchar.preprocessing import BackgroundChoices, subtract_background
from psbutils.arrayshapes import Shapes
# Base names (without the ".png") for plates output by characterize() for each plate.
#
SIGNALS_VS_TIME = "signals_vs_time"
SIGNALS_VS_REFERENCE = "signals_vs_reference"
GROWTH_MODEL = "growth_model"
INTEGRATION_FMT = "integration_{}"
CHARACTERIZATIONS_CSV = "characterizations.csv"
MODEL_VALUES_CSV = "model_values.csv"
VALUE_CORRELATIONS = "value_correlations"
RANK_CORRELATIONS = "rank_correlations"
def characterize_integral(
subtracted: pd.DataFrame, config: CharacterizationConfig, reference_time: Optional[float] = None
) -> Dict[str, float]:
"""Run static characterization for a single well using the integral method.
Args:
subtracted: a data frame containing background-subtracted time-series observations
config: a characterization config instance
specified_interval: a specified TimePeriod that overrides what is in `config`
"""
if reference_time is not None:
config.time_window.reference = reference_time
result: Dict[str, float] = {}
interval = config.time_window + config.maturation_offset
result.update(integrate(subtracted, config.signals, interval=interval))
return result
def characterize_gradient(
subtracted: pd.DataFrame, config: CharacterizationConfig, model: BaseModel, sample_id: str
) -> Dict[str, float]:
"""Run static characterization for a single well using the gradient method.
Args:
subtracted: a data frame containing background-subtracted time-series observations
config: a characterization config instance
model: a BaseModel (Logistic or Gompertz)
sample_id: identifier for sample being characterized
"""
gradient_result = transcriptional_activity_ratio(
subtracted,
config.signals,
config.reference,
config.signal_properties,
model.parameters.growth_rate,
model.growth_period,
maturation_offset=config.maturation_offset,
sample_id=sample_id,
)
result = {k: v.activity_value for k, v in gradient_result.items()}
return result
def plot_timeseries(
data: pd.DataFrame,
color_dct: Dict[str, str],
annotation_spec: AnnotationSpec,
target_path: Optional[Path] = None,
ax: Optional[plt.Axes] = None,
title: Optional[str] = None,
) -> plt.Axes:
"""
Plots the provided data as a time series.
Args:
data: dataset to plot; must have a TIME ("time") column.
color_dct: color to use for each dependent variable in data
annotation_spec: specification of what annotations to put in the plot
target_path: where to save the figure to, if present
ax: Axis object to use; a new one is generated if absent.
title: the title for the plot, if any
Returns:
the Axis object for the plot
"""
def minus_mean_value(signal: str) -> float:
return -cast(float, data[signal].mean())
signals = sorted(set(data.columns).difference([TIME]), key=minus_mean_value) # type: ignore
if ax is None: # pragma: no cover
plt.figure(figsize=(6.4, 4.8))
ax = cast(plt.Axes, plt.subplot())
plot_signals_against_time(
data, signals, ax, colors=color_dct, size=2.0, annotation_spec=annotation_spec, title=title
)
# sns.despine()
if target_path is not None: # pragma: no cover
ax.get_figure().savefig(target_path) # type: ignore
return ax
def fit_model_for_well(
config: CharacterizationConfig, subtracted_data: pd.DataFrame, well: Optional[str], sample_id: str
) -> Optional[BaseModel]:
"""
Args:
config: configuration to get some parameter values from
subtracted_data: raw_data with background subtracted
sample_id: used to identify the sample if there are problems.
Returns:
the fitted model, or None if fitting failed.
"""
model_class = get_growth_model_class(config.growth_model)
time_data = cast(ArrayLike, subtracted_data[TIME])
signal_data = cast(ArrayLike, subtracted_data[config.growth_signal])
try:
model_params = model_class.fit(time_data, signal_data) # type: ignore
except RuntimeError: # pragma: no cover
logging.warning(f"Model fitting failed for well {well} (sample {sample_id})")
return None
return model_class(model_params)
def get_growth_model_class(gm_type: GrowthModelType) -> Type[BaseModel]:
"""
Returns the growth model class (not instance) corresponding to the provided enum instance.
"""
if gm_type == GrowthModelType.Logistic:
return LogisticModel # pragma: no cover
if gm_type == GrowthModelType.Gompertz:
return GompertzModel
raise ValueError(f"Unexpected model type: {gm_type}") # pragma: no cover
K = TypeVar("K")
S = TypeVar("S")
V = TypeVar("V")
def dictionary_with_subkey(subkey: S, dct: Dict[K, Dict[S, V]]) -> Dict[K, V]:
return dict((key, val[subkey]) for (key, val) in dct.items() if subkey in val)
def plot_for_sample(
config: CharacterizationConfig,
subtracted_data: pd.DataFrame,
model: Optional[BaseModel],
ax_dict: Dict[str, plt.Axes],
well_or_sample_id: str,
annotation_spec: AnnotationSpec,
) -> None:
"""
Fills in plots for the given sample.
Args:
config: configuration to get some parameter values from
subtracted_data: raw data with background subtracted
model: fitted model, whose parameters are used in the growth model plot; or None if fitting failed
ax_dict: dictionary from plot names to Axis objects
well_or_sample_id: to use in titles of plots
annotation_spec: spec of what annotations to write
"""
production_phase = config.time_window + config.maturation_offset
try:
production_phase.check_bounds_against(cast(Iterable[float], subtracted_data[TIME]), well_or_sample_id)
phase_color = "green"
except ValueError: # pragma: no cover
phase_color = "red"
color_dct = {signal: config.signal_properties[signal].color for signal in config.signal_properties}
relevant_signals = config.signals + [config.reference, config.growth_signal, TIME]
relevant_data = cast(pd.DataFrame, subtracted_data[relevant_signals])
# 8 characters should be enough to uniquely identify a sample without taking up too much space
title = well_or_sample_id[:8]
ax_timeseries = ax_dict.get(SIGNALS_VS_TIME, None)
if ax_timeseries is not None:
plot_timeseries(relevant_data, color_dct, ax=ax_timeseries, annotation_spec=annotation_spec, title=title)
# Visualise the production phase
mark_phase(ax_timeseries, interval=production_phase, color=phase_color, alpha=0.1)
ax_ref = ax_dict.get(SIGNALS_VS_REFERENCE, None)
if ax_ref is not None:
plot_signals_against_reference(
subtracted_data,
signals=config.signals,
reference=config.reference,
colors=color_dct,
ax=ax_ref,
size=2.0,
title=title,
annotation_spec=annotation_spec,
)
ax_gm = ax_dict.get(GROWTH_MODEL, None)
if ax_gm is not None:
plot_growth_model(
cast(ArrayLike, subtracted_data[TIME]),
cast(ArrayLike, subtracted_data[config.growth_signal]),
ax=ax_gm,
model=model,
growth_period_color=phase_color,
annotation_spec=annotation_spec,
title=title,
maturation_offset=config.maturation_offset,
)
for signal in config.signals:
ax_signal = ax_dict.get(INTEGRATION_FMT.format(signal), None)
if ax_signal is not None:
plot_integration(
subtracted_data,
signal,
production_phase,
ax_signal,
annotation_spec=annotation_spec,
title=title,
)
def get_relative_ranks(data: Sequence[float]) -> np.ndarray:
rank_dct1 = collections.defaultdict(list)
for index, value in enumerate(sorted(data)):
rank_dct1[value].append(index)
rank_dct = dict((key, sum(lst) / len(lst)) for key, lst in rank_dct1.items())
return np.array([rank_dct[value] for value in data]) / (len(data) - 1)
class Plate(Dict[str, Dict[str, float]]):
"""A class representing the characterization results for a given Dataset and CharacterizationConfig.
Methods:
__getitem__, so that the results can be accessed using ``dataset[key]`` syntax
items, so that one can iterate over pairs (key, data frame) as in ``dict.items()``
"""
def __init__(self, data: Dataset, config: CharacterizationConfig):
"""Initialize a Plate instance.
Args:
data: a dataset instance
config: a characterization config instance
"""
super().__init__()
self.config = config
self.data = data
self.subtracted_columns = config.background_subtract_columns()
self.subtract_strategy = BackgroundChoices.Minimum
self.plots: Dict[str, Tuple[plt.Figure, np.ndarray]] = {}
self.layout: Optional[Tuple[List[str], List[int]]] = None
self.reference_time = self.get_reference_time()
def get_reference_time(self) -> Optional[float]:
"""If possible, identify the average time of maximal growth in the reference wells."""
if self.config.method != CharacterizationMethod.Integral or len(self.config.reference_wells) == 0:
return None
tmaxs = []
for ref_id in self.config.reference_wells:
subtracted = subtract_background(
self.data[ref_id], columns=self.subtracted_columns, strategy=self.subtract_strategy
)
growth_model = fit_model_for_well(self.config, subtracted, None, ref_id)
if growth_model is None:
logging.warning(f"Model fitting failed on reference well {ref_id}") # pragma: no cover
else:
tmaxs.append(growth_model.time_maximal_activity)
if len(tmaxs) > 0:
return float( | np.mean(tmaxs) | numpy.mean |
import numpy as np
import scipy.interpolate
from .. import distributions as D
np.random.seed(1)
def sampltest(distr, left=None, right=None, bounds=None):
# check that mean and stddev from the generated sample
# match what we get from integrating the PDF
def FF1(x):
return distr.pdf(x) * x
def FF2(x):
return distr.pdf(x) * x**2
if left is None:
left = 0
if right is None:
right = np.inf
if bounds is None:
mom1, _ = scipy.integrate.quad(FF1, left, right)
mom2, _ = scipy.integrate.quad(FF2, left, right)
else:
mom1, mom2 = 0, 0
for curb in bounds:
cmom1, _ = scipy.integrate.quad(FF1, curb[0], curb[1])
cmom2, _ = scipy.integrate.quad(FF2, curb[0], curb[1])
mom1 += cmom1
mom2 += cmom2
std = np.sqrt(mom2 - mom1**2)
assert (mom2 > mom1**2)
N = int(1e6)
samps = distr.rvs(N)
assert ((samps.mean() - mom1) < 5 * std / np.sqrt(N))
assert ((samps.std() - std) < 20 * std / | np.sqrt(2 * (N - 1)) | numpy.sqrt |
from operator import add
import random
from attacks import utils
import torch.nn.functional as F
import numpy as np
import torch
import scipy.sparse as sp
from attacks.attack import gcn_norm
def dice_injection(adj, n_inject, n_edge_max, origin_labels, target_idx, device):
n_classes = max(origin_labels)+1
class_pos = [[] for i in range(n_classes)]
for i in origin_labels:
class_id = origin_labels[i]
class_pos[class_id].append(i)
direct_edges = n_edge_max//2 # number of edges connect to target nodes
bridge_edges = n_edge_max-direct_edges # number of edges connect to different classes
n_node = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
# connect injected nodes to target nodes
for i in range(n_inject):
islinked = np.zeros(n_test)
for j in range(direct_edges):
x = i + n_node
yy = random.randint(0, n_test - 1)
while islinked[yy] > 0:
yy = random.randint(0, n_test - 1)
islinked[yy] = 1
y = target_idx[yy]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = np.hstack([adj_attack.col, new_edges_y])
adj_attack.data = np.hstack([adj_attack.data, new_data])
adj_attack = utils.adj_to_tensor(adj_attack).to(device)
return adj_attack
def random_class_injection(adj, n_inject, n_edge_max, origin_labels, target_idx, device, not_full=False):
n_classes = max(origin_labels)+1
class_pos = [[] for i in range(n_classes)]
min_class_len = len(target_idx)
for (i,pos) in enumerate(target_idx):
class_id = origin_labels[pos]
class_pos[class_id].append(i)
for c in class_pos:
min_class_len = min(min_class_len,len(class_pos[class_id]))
if not not_full:
assert min_class_len >= n_edge_max, print(f"min_class_len {min_class_len}")
n_node = adj.size(0)
adj=utils.tensor_to_adj(adj)
target_idx = target_idx.cpu()
n_test = target_idx.shape[0]
new_edges_x = []
new_edges_y = []
new_data = []
for i in range(n_inject):
islinked = np.zeros(n_test)
class_id = random.randint(0, n_classes-1)
n_connections = min(len(class_pos[class_id]),n_edge_max)
for j in range(n_connections):
x = i + n_node
yy = random.randint(0, len(class_pos[class_id]) - 1)
while islinked[class_pos[class_id][yy]] > 0:
yy = random.randint(0, len(class_pos[class_id]) - 1)
islinked[class_pos[class_id][yy]] = 1
y = target_idx[class_pos[class_id][yy]]
new_edges_x.extend([x, y])
new_edges_y.extend([y, x])
new_data.extend([1, 1])
add1 = sp.csr_matrix((n_inject, n_node))
add2 = sp.csr_matrix((n_node + n_inject, n_inject))
adj_attack = sp.vstack([adj, add1])
adj_attack = sp.hstack([adj_attack, add2])
adj_attack.row = np.hstack([adj_attack.row, new_edges_x])
adj_attack.col = | np.hstack([adj_attack.col, new_edges_y]) | numpy.hstack |
import numpy as np
import matplotlib.pyplot as plt
import pytest
from diagnostics import (
TimeSerie,
BooleanTimeSerie,
StateChangeArray,
BooleanStateChangeArray,
Report,
Event,
)
from diagnostics import DataLossError
import datetime as dt
import pytz
def compare_timeseries(a, b):
if len(a) != len(b):
return False
data = all(a.data == b.data)
channel = a.channel == b.channel
name = a.name == b.name
class_ = type(a) == type(b)
return data & channel & name
def compare_statechangearrays(a, b):
if len(a) != len(b):
return False
data = all(a.data == b.data)
t = all(a.t == b.t)
name = a.name == b.name
class_ = type(a) == type(b)
return data & t & name
def compare_events(a, b):
value = a.value == b.value
t = a.t == b.t
name = a.name == b.name
validity = a.validity == b.validity
class_ = type(a) == type(b)
return value & t & name & validity & class_
def compare_reports(a, b):
t0 = a.t0 == b.t0
te = a.te == b.te
name = a.name == b.name
return t0 & te & name
def test_timeserie_init():
a = TimeSerie([1, 2, 3], t0=0, fs=1, name="a")
assert all(a.data == [1, 2, 3])
assert a.t0 == 0
assert a.fs == 1
assert a.name == "a"
return True
def test_timeserie_datetime_t0():
a = TimeSerie(
[1, 2, 3], t0=pytz.utc.localize(dt.datetime(2000, 1, 1)), fs=1, name="a"
)
b = TimeSerie([1, 2, 3], t0=dt.datetime(2000, 1, 1), fs=1, name="b")
assert a.t0 == 946684800.0
assert a.te == 946684802.0
assert a.dt0 == dt.datetime(2000, 1, 1)
assert a.dte == dt.datetime(2000, 1, 1, 0, 0, 2)
assert all(
a.dt
== np.array(
["2000-01-01T00:00:00", "2000-01-01T00:00:01", "2000-01-01T00:00:02"],
dtype="datetime64",
)
)
assert b.t0 == 946684800.0
assert b.te == 946684802.0
assert b.dt0 == dt.datetime(2000, 1, 1)
assert b.dte == dt.datetime(2000, 1, 1, 0, 0, 2)
assert all(
b.dt
== np.array(
["2000-01-01T00:00:00", "2000-01-01T00:00:01", "2000-01-01T00:00:02"],
dtype="datetime64",
)
)
return True
def test_timeserie_nparray():
a = TimeSerie(np.array([1, 2, 3]), t0=0, fs=1, name="a")
assert isinstance(a.data, np.ndarray)
b = TimeSerie([1, 2, 3], t0=0, fs=1, name="b")
assert isinstance(b.data, np.ndarray)
return True
def test_timeserie_properties():
a = TimeSerie([1, 2, 3], name="a", fs=2, t0=1)
assert a.hz == a.fs
a.hz = 1
assert a.fs == 1
b = TimeSerie([4, 5, 6], name="a", fs=1, t0=1)
b.data = [1, 2, 3]
assert isinstance(b.data, np.ndarray)
assert all(b.data == [1, 2, 3])
assert compare_timeseries(a, b)
with pytest.raises(ValueError):
a.channel = (1, 2, 3)
return True
def test_timeserie_resett0():
a = TimeSerie([1, 2, 3], name="a", fs=2, t0=1)
assert a.t0 == 1
a.reset_t0()
assert a.t0 == 0
return True
def test_timeserie_roundt0():
a = TimeSerie(np.arange(10), name="a", fs=1, t0=1.1)
assert a.t0 == 1.1
a.round_t0()
assert a.t0 == 1.0
a = TimeSerie(np.arange(100), name="a", fs=10, t0=1.11)
assert a.t0 == 1.11
a.round_t0()
assert a.t0 == 1.1
def test_timeserie_iter():
a = TimeSerie([1, 2, 3], fs=2, t0=1)
lst = list(a.iter())
assert lst == [(1.0, 1), (1.5, 2), (2.0, 3)]
return True
def test_timeserie_defaultsettings():
a = TimeSerie([1, 2, 3])
assert a.name == ""
assert a.t0 == 0
assert a.fs == 1
return True
def test_timeserie_fsfloat():
a = TimeSerie([1, 2, 3], fs=1.1)
assert a.fs == 1
return True
def test_timeserie_repr():
a = TimeSerie([1, 2, 3], fs=2, t0=1, name="a")
assert repr(a) == "TimeSerie([1, 2, 3], t0=1, name='a', fs=2)"
b = TimeSerie([1, 2, 3, 4, 5, 6, 7], fs=3, t0=2, name="b")
assert repr(b) == "TimeSerie([1, 2, 3, 4, 5, 6, 7], t0=2, name='b', fs=3)"
return True
def test_timeserie_len():
a = TimeSerie([1, 2, 3])
assert len(a) == 3
b = TimeSerie([1, 2, 3, 4, 5, 6, 7])
assert len(b) == 7
return True
def test_timeserie_getitem():
a = TimeSerie([1, 2, 3])
assert a[0] == 1
assert a[-1] == 3
b = TimeSerie([1, 2, 3, 4, 5, 6, 7])
assert b[1] == 2
return True
def test_timeserie_eq():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
eq_1 = a == a
assert compare_timeseries(
eq_1, BooleanTimeSerie([True, True, True], t0=1, name="(a == a)", fs=1)
)
eq_2 = a == 1
assert compare_timeseries(
eq_2, BooleanTimeSerie([True, False, False], t0=1, fs=1, name="")
)
b = TimeSerie([1, 2, 3], fs=2, name="b", t0=1)
with pytest.raises(ValueError):
eq_4 = a == b
return True
def test_timeserie_neq():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
eq_1 = a != a
assert compare_timeseries(
eq_1, BooleanTimeSerie([False, False, False], t0=1, name="(a != a)", fs=1)
)
eq_2 = a != 1
assert compare_timeseries(
eq_2, BooleanTimeSerie([False, True, True], t0=1, fs=1, name="")
)
b = TimeSerie([1, 2, 3], fs=2, name="b", t0=1)
with pytest.raises(ValueError):
eq_4 = a != b
return True
def test_timeserie_lessthen():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 5], name="b", fs=1, t0=1)
lt_1 = b < a
assert compare_timeseries(
lt_1, BooleanTimeSerie([False, True, False], t0=1, fs=1, name="(b < a)")
)
lt_2 = a < 2
assert compare_timeseries(
lt_2, BooleanTimeSerie([True, False, False], t0=1, fs=1, name="")
)
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
eq_3 = a < c
return True
def test_timeserie_greaterthen():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 5], name="b", fs=1, t0=1)
lt_1 = a > b
assert compare_timeseries(
lt_1, BooleanTimeSerie([False, True, False], t0=1, fs=1, name="(a > b)")
)
lt_2 = a > 2
assert compare_timeseries(
lt_2, BooleanTimeSerie([False, False, True], t0=1, fs=1, name="")
)
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
eq_3 = a > c
return True
def test_timeserie_lessequalthen():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 3], name="b", fs=1, t0=1)
lt_1 = b <= a
assert compare_timeseries(
lt_1, BooleanTimeSerie([False, True, True], t0=1, fs=1, name="(b <= a)")
)
lt_2 = a <= 2
assert compare_timeseries(
lt_2, BooleanTimeSerie([True, True, False], t0=1, fs=1, name="")
)
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
eq_3 = a <= c
return True
def test_timeserie_greaterequalthen():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 3], name="b", fs=1, t0=1)
lt_1 = a >= b
assert compare_timeseries(
lt_1, BooleanTimeSerie([False, True, True], t0=1, fs=1, name="(a >= b)")
)
lt_2 = a >= 2
assert compare_timeseries(
lt_2, BooleanTimeSerie([False, True, True], t0=1, fs=1, name="")
)
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
eq_3 = a >= c
return True
def test_timeserie_and():
a = TimeSerie([True, True, False, False], name="a", fs=1, t0=1)
b = BooleanTimeSerie([True, False, True, False], name="b", fs=1, t0=1)
c = TimeSerie([1, 2, 3, 4], name="c", fs=1, t0=1)
d = BooleanTimeSerie([True, True, True, True], name="d", fs=2, t0=3)
and_1 = a & b
assert compare_timeseries(
and_1, BooleanTimeSerie([True, False, False, False], name="(a & b)", fs=1, t0=1)
)
with pytest.raises(ValueError):
and_2 = a & c
with pytest.raises(ValueError):
and_3 = b & d
return True
def test_timeserie_or():
a = TimeSerie([True, True, False, False], name="a", fs=1, t0=1)
b = BooleanTimeSerie([True, False, True, False], name="b", fs=1, t0=1)
c = TimeSerie([1, 2, 3, 4], name="c", fs=1, t0=1)
d = BooleanTimeSerie([True, True, True, True], name="d", fs=2, t0=3)
and_1 = a | b
assert compare_timeseries(
and_1, BooleanTimeSerie([True, True, True, False], name="(a | b)", fs=1, t0=1)
)
with pytest.raises(ValueError):
and_2 = a | c
with pytest.raises(ValueError):
and_3 = b | d
return True
def test_timeserie_xor():
a = TimeSerie([True, True, False, False], name="a", fs=1, t0=1)
b = BooleanTimeSerie([True, False, True, False], name="b", fs=1, t0=1)
c = TimeSerie([1, 2, 3, 4], name="c", fs=1, t0=1)
d = BooleanTimeSerie([True, True, True, True], name="d", fs=2, t0=3)
and_1 = a ^ b
assert compare_timeseries(
and_1, BooleanTimeSerie([False, True, True, False], name="(a ^ b)", fs=1, t0=1)
)
with pytest.raises(ValueError):
and_2 = a ^ c
with pytest.raises(ValueError):
and_3 = b ^ d
return True
def test_timeserie_invert():
a = TimeSerie([True, False], name="a", fs=1, t0=1)
b = TimeSerie([1, 2, 3, 4], name="b", fs=1, t0=1)
invert_1 = ~a
assert compare_timeseries(
invert_1, BooleanTimeSerie([False, True], name="(~a)", fs=1, t0=1)
)
with pytest.raises(ValueError):
invert_2 = ~b
return True
def test_timeserie_add():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 3], name="b", fs=1, t0=1)
add_1 = a + b
assert compare_timeseries(add_1, TimeSerie([4, 3, 6], name="(a + b)", fs=1, t0=1))
add_2 = a + 1
assert compare_timeseries(add_2, TimeSerie([2, 3, 4], name="", fs=1, t0=1))
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
add_3 = a + c
return True
def test_timeserie_radd():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
add_1 = 1 + a
assert compare_timeseries(add_1, TimeSerie([2, 3, 4], name="", fs=1, t0=1))
return True
def test_timeserie_sub():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
b = TimeSerie([3, 1, 3], name="b", fs=1, t0=1)
sub_1 = a - b
assert compare_timeseries(sub_1, TimeSerie([-2, 1, 0], name="(a - b)", fs=1, t0=1))
sub_2 = a - 1
assert compare_timeseries(sub_2, TimeSerie([0, 1, 2], name="", fs=1, t0=1))
c = TimeSerie([7, 8, 9, 0], name="c", fs=1, t0=0)
with pytest.raises(ValueError):
sub_3 = a - c
return True
def test_timeserie_rsub():
a = TimeSerie([1, 2, 3], name="a", fs=1, t0=1)
sub_1 = 1 - a
assert compare_timeseries(sub_1, TimeSerie([0, -1, -2], name="", fs=1, t0=1))
return True
def test_timeserie_at():
a = TimeSerie([1, 2, 3, 4, 5, 6], t0=1, fs=1, name="a")
assert a.at(2) == 2
return True
def test_timeserie_where():
a = TimeSerie([1, 2, 3, 4, 5, 6], t0=1, fs=1, name="a")
d = a.where(a.t >= 3)
d_test = np.array([3, 4, 5, 6])
assert len(d) == len(d_test)
assert all(d == d_test)
return True
def test_timeserie_empty():
a = TimeSerie.empty(1, 10, fs=10, name="a")
assert len(a) == 90
assert a.te == 9.9
b = TimeSerie.empty(2, 5, fs=4, name="b", inclusive=True)
assert len(b) == 13
assert b.te == 5.0
c = TimeSerie.empty(
dt.datetime(2018, 1, 1, 12),
dt.datetime(2018, 1, 1, 13),
fs=1,
name="c",
inclusive=True,
)
assert c.te - c.t0 == 3600.0
d = TimeSerie.empty(1.2, 4.8, fs=100, name="d")
assert d.t0 == 1.2
assert d.te == 4.79
return True
def test_timeserie_plot():
plt.ioff()
a = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=1)
f = a.plot()
b = TimeSerie(
[-2, -1, 0, 1, 2, 3, 4, 5], name="b", fs=1, t0=dt.datetime(2019, 1, 1)
)
f = b.plot(as_dt=True)
return True
def test_timeserie_tochannel():
a = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=3)
b = TimeSerie.empty(0, 20, fs=1)
a.to_channel(b)
assert len(a) == 20
assert a.t0 == 0
assert a.te == 19
assert a.at(3) == -2
assert a.at(4) == -1
assert a.at(5) == 0
assert a.at(6) == 1
assert a.at(7) == 2
assert a.at(8) == 3
assert a.at(9) == 4
assert a.at(10) == 5
c = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=3)
d = TimeSerie.empty(0, 20, fs=2)
with pytest.raises(ValueError):
c.to_channel(d)
e = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=3)
f = TimeSerie.empty(4, 20, fs=1)
with pytest.raises(DataLossError):
e.to_channel(f)
g = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=3)
h = TimeSerie.empty(0, 8, fs=1)
with pytest.raises(DataLossError):
g.to_channel(h)
i = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=3)
j = TimeSerie.empty(0.5, 20.5, fs=1)
with pytest.raises(ValueError):
i.to_channel(j)
return True
def test_timserie_mod():
a = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=1)
b = a.modify("default", inplace=False)
assert compare_timeseries(b, a)
a.modify("default", inplace=True)
assert compare_timeseries(a, b)
a = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=1)
b = a.modify("zero_negatives", inplace=False)
assert compare_timeseries(
b, TimeSerie([0, 0, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=1)
)
a.modify("zero_negatives", inplace=True)
assert compare_timeseries(a, b)
a = TimeSerie([-2, -1, 0, 1, 2, 3, 4, 5], name="a", fs=1, t0=1)
b = a.modify("correct_negatives", inplace=False)
assert compare_timeseries(
b, TimeSerie([0, 1, 2, 3, 4, 5, 6, 7], name="a", fs=1, t0=1)
)
a.modify("correct_negatives", inplace=True)
assert compare_timeseries(a, b)
return True
def test_timeserie_toevents():
a = TimeSerie([1, 2, 3], t0=1, fs=2, name="a")
e1, e2, e3 = a.to_events()
assert compare_events(e1, Event(1, t=1, name="a"))
assert compare_events(e2, Event(2, 1.5, name="a"))
assert compare_events(e3, Event(3, 2.0, name="a"))
return True
def test_timeserie_tobool():
a = TimeSerie([0, 0, 0, 1, 2, 3], t0=1, fs=2, name="a")
b = a.to_bool(inplace=False)
assert compare_timeseries(
b,
BooleanTimeSerie([False, False, False, True, True, True], t0=1, fs=2, name="a"),
)
a.to_bool(inplace=True)
assert compare_timeseries(a, b)
return True
def test_timeserie_tostatechangearray():
a = TimeSerie([1, 1, 1, 2, 2, 3, 4, 4, 4], t0=1, fs=2, name="a")
sta_a = a.to_statechangearray()
assert compare_statechangearrays(
sta_a, StateChangeArray([1, 2, 3, 4], t=[1, 2.5, 3.5, 4], name="a")
)
return True
def test_timeserie_toreports():
a = TimeSerie(10 * np.sin(np.linspace(0, 2 * np.pi, 100)), t0=1, fs=10, name="a")
b = a > 4
b.name = "a > 4"
reports = b.to_reports()
assert len(reports) == 1
assert compare_reports(reports[0], Report(1.7, 5.4, name="a > 4"))
return True
def test_timeserie_interpolate():
a = TimeSerie([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], t0=0, fs=1, name="a")
t_new = np.arange(0, 9 + 0.5, 0.5)
b = a.interpolate(t_new, inplace=False)
assert compare_timeseries(
b, TimeSerie(np.arange(1, 10.5, 0.5), t0=0, fs=2, name="a")
)
a.interpolate(t_new, inplace=True)
assert compare_timeseries(a, b)
c = BooleanTimeSerie(
[False, False, True, True, True, False, False], t0=1, fs=1, name="c"
)
t_new = np.arange(1, 7 + 0.5, 0.5)
d = c.interpolate(t_new, inplace=False)
assert compare_timeseries(
d,
BooleanTimeSerie(3 * [False] + 7 * [True] + 3 * [False], t0=1, fs=2, name="c"),
)
c.interpolate(t_new, inplace=True)
assert compare_timeseries(c, d)
return True
def test_booleantimeserie_init():
with pytest.raises(ValueError):
a = BooleanTimeSerie([1, 2, 3], fs=1, t0=1, name="a")
return True
def test_booleantimeserie_repr():
a = BooleanTimeSerie([False, False, False, True, True, True], fs=2, t0=1, name="a")
assert (
repr(a)
== "BooleanTimeSerie([False, False, False, True, True, True], t0=1, name='a', fs=2)"
)
return True
def test_booleantimeserie_properties():
a = BooleanTimeSerie([False, True, False], name="a", fs=2, t0=1)
assert a.hz == a.fs
a.hz = 1
assert a.fs == 1
b = BooleanTimeSerie([True, True, True], name="a", fs=1, t0=1)
b.data = [False, True, False]
assert isinstance(b.data, np.ndarray)
assert all(b.data == [False, True, False])
assert compare_timeseries(a, b)
with pytest.raises(ValueError):
a.data = [1, 2, 3]
return True
def test_statechangearray_init():
a = StateChangeArray([1, 3, 5, 7], t=[1, 2, 4, 8])
assert all(a.data == [1, 3, 5, 7])
assert all(a.t == [1, 2, 4, 8])
assert a.name == ""
b = StateChangeArray([2, 4, 6, 8], t=[1, 2, 4, 8], name="b")
with pytest.raises(ValueError):
c = StateChangeArray([1, 2, 3, 4], t=[1, 2, 4], name="c")
d = StateChangeArray( | np.array([1, 3, 5, 7]) | numpy.array |
from astropy.table import QTable
import astropy.units as u
import numpy as np
def test_energy_bias_resolution():
from pyirf.benchmarks import energy_bias_resolution
np.random.seed(1337)
TRUE_RES_1 = 0.2
TRUE_RES_2 = 0.05
TRUE_BIAS_1 = 0.1
TRUE_BIAS_2 = -0.05
true_bias = np.append(np.full(1000, TRUE_BIAS_1), np.full(1000, TRUE_BIAS_2))
true_resolution = np.append( | np.full(1000, TRUE_RES_1) | numpy.full |
'''
Created on 18 Sep 2017
@author: ywz
'''
import numpy
import tensorflow as tf
class DiagonalGaussian:
def __init__(self, dim):
self.dim = dim
def specs(self):
return [("mean", (self.dim,)), ("log_var", (self.dim,))]
def keys(self):
return ["mean", "log_var"]
def kl_numpy(self, old_dist, new_dist):
old_means = old_dist["mean"]
old_log_stds = old_dist["log_var"]
new_means = new_dist["mean"]
new_log_stds = new_dist["log_var"]
old_std = numpy.exp(old_log_stds)
new_std = numpy.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = numpy.square(old_means - new_means) + | numpy.square(old_std) | numpy.square |
import multiprocessing
import os
import pickle
import numpy as np
import pandas as pd
import torch
from torch.nn import functional as F
from analysis import correlations
from experiments import spec_util
from models import infogan, load_checkpoint
from morphomnist import io, measure
DATA_ROOT = "/vol/biomedic/users/dc315/mnist"
CHECKPOINT_ROOT = "/data/morphomnist/checkpoints"
PCORR_ROOT = "/data/morphomnist/pcorr_fixed"
SPEC_TO_DATASET = {"plain": "plain",
"plain+thin+thic": "global",
"plain+swel+frac": "local"}
def encode(gan: infogan.InfoGAN, x):
with torch.no_grad():
_, hidden = gan.dis(x)
cat_logits, cont_mean, cont_logvar, bin_logit = gan.rec(hidden)
return cat_logits, cont_mean, cont_logvar, bin_logit
def interleave(arrays, which):
for a in arrays:
a[0] = a[0].copy()
for i in range(1, max(which) + 1):
idx = (which == i)
for a in arrays:
a[0][idx] = a[i][idx]
return [a[0] for a in arrays]
def load_test_data(data_dirs, weights=None):
metrics_paths = [os.path.join(data_dir, "t10k-morpho.csv") for data_dir in data_dirs]
images_paths = [os.path.join(data_dir, "t10k-images-idx3-ubyte.gz") for data_dir in data_dirs]
labels_paths = [os.path.join(data_dir, "t10k-labels-idx1-ubyte.gz") for data_dir in data_dirs]
metrics = list(map(pd.read_csv, metrics_paths))
images = list(map(io.load_idx, images_paths))
labels = list(map(io.load_idx, labels_paths))
if len(data_dirs) > 1:
if weights is not None:
weights = np.array(weights) / np.sum(weights)
which = np.random.choice(len(data_dirs), size=len(metrics[0]), p=weights)
metrics, images, labels = interleave([metrics, images, labels], which)
return metrics, images, labels, which
else:
return metrics[0], images[0], labels[0], None
def compute_partial_correlation(gan: infogan.InfoGAN, images, metrics, cols):
cat_logits, mean, logvar, bin_logits = encode(gan, images)
phi = F.softmax(cat_logits.cpu(), dim=1).numpy()
mu = mean.cpu().numpy()
gamma = F.sigmoid(bin_logits.cpu()).numpy() \
if bin_logits is not None else np.empty([metrics.shape[0], 0])
phi_ = np.eye(phi.shape[1])[phi.argmax(1)] # One-hot
gamma_ = gamma > .5
cat_dim, cont_dim, bin_dim = phi.shape[1], mu.shape[1], gamma.shape[1]
splits = np.cumsum([cat_dim, cont_dim, bin_dim])
pcorr = np.zeros([len(cols), splits[-1]])
# Categorical codes
dvs = metrics[cols].values
for cat in range(splits[0]):
ivs = | np.column_stack([phi_[:, cat], mu, gamma_]) | numpy.column_stack |
# Copyright (c) 2022 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Test partitioning by date.
==========================
"""
from typing import Iterator
import pickle
import dask.array.core
import fsspec
import numpy
import pytest
import xarray
from .. import Date, get_codecs
from ... import dataset
# pylint: disable=unused-import # Need to import for fixtures
from ...tests.cluster import dask_client, dask_cluster
# pylint: disable=disable=unused-argument
def test_split_dataset(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test the split_dataset method."""
start_date = numpy.datetime64("2000-01-06", "us")
delta = numpy.timedelta64(1, "h")
for end_date, indices, resolution in [
(
numpy.datetime64("2001-12-31", "Y"),
slice(0, 1),
"Y",
),
(
numpy.datetime64("2000-12-31", "M"),
slice(0, 2),
"M",
),
(
numpy.datetime64("2000-12-31", "D"),
slice(0, 3),
"D",
),
(
numpy.datetime64("2000-01-31", "h"),
slice(0, 4),
"h",
),
]:
# Time delta between two partitions
timedelta = numpy.timedelta64(1, resolution)
# Temporal axis to split
dates = numpy.arange(start_date, end_date, delta)
# Measured data
observation = numpy.random.rand(dates.size) # type: ignore
# Create the dataset to split
ds = xarray.Dataset(
dict(dates=xarray.DataArray(dates, dims=("num_lines", )),
observation=xarray.DataArray(observation,
dims=("num_lines", ))))
partitioning = Date(("dates", ), resolution)
assert len(partitioning) == len(range(indices.start, indices.stop))
# Date of the current partition
date = numpy.datetime64(start_date, resolution)
# Build the test dataset
ds = dataset.Dataset.from_xarray(ds, delayed=False)
iterator = partitioning.split_dataset(ds, "num_lines")
assert isinstance(iterator, Iterator)
for partition, indexer in iterator:
subset = ds.isel(indexer)
# Cast the date to the a datetime object to extract the date
item = date.astype("datetime64[us]").item()
expected = (
f"year={item.year}",
f"month={item.month:02d}",
f"day={item.day:02d}",
f"hour={item.hour:02d}",
)
assert partition == expected[indices]
folder = "/".join(partition)
fields = partitioning.parse(folder)
parsed_date, = partitioning.encode(fields)
assert parsed_date == numpy.datetime64(date).astype(
f"datetime64[{resolution}]")
expected_selection = dates[
(dates >= parsed_date) # type: ignore
& (dates < parsed_date + timedelta)] # type: ignore
assert numpy.all(
subset.variables["dates"].values == expected_selection)
expected = (
("year", item.year),
("month", item.month),
("day", item.day),
("hour", item.hour),
)
assert fields == expected[indices]
assert partitioning.join(fields, "/") == folder
assert partitioning.join(partitioning.decode((parsed_date, )),
"/") == folder
date += timedelta
def test_construction():
"""Test the construction of the Date class."""
partitioning = Date(("dates", ), "D")
assert partitioning.resolution == "D"
assert partitioning.variables == ("dates", )
assert partitioning.dtype() == (("year", "uint16"), ("month", "uint8"),
("day", "uint8"))
assert len(partitioning) == 3
assert partitioning.get_config() == {
"id": "Date",
"resolution": "D",
"variables": ("dates", ),
}
with pytest.raises(ValueError):
Date(("dates1", "dates2"), "D")
with pytest.raises(ValueError):
Date(("dates", ), "W")
def test_config():
"""Test the configuration of the Date class."""
partitioning = Date(("dates", ), "D")
assert partitioning.dtype() == (("year", "uint16"), ("month", "uint8"),
("day", "uint8"))
config = partitioning.get_config()
partitioning = get_codecs(config)
assert isinstance(partitioning, Date)
def test_pickle():
"""Test the pickling of the Date class."""
partitioning = Date(("dates", ), "D")
other = pickle.loads(pickle.dumps(partitioning))
assert isinstance(other, Date)
assert other.resolution == "D"
assert other.variables == ("dates", )
def test_no_monotonic(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test that the Date partitioning raises an error if the temporal axis is
not monotonic."""
dates = numpy.arange(numpy.datetime64("2000-01-01", "h"),
numpy.datetime64("2000-01-02", "h"),
numpy.timedelta64(1, "m"))
numpy.random.shuffle(dates)
partitioning = Date(("dates", ), "h")
# pylint: disable=protected-access
with pytest.raises(ValueError):
list(partitioning._split({"dates": dask.array.core.from_array(dates)}))
# pylint: enable=protected-access
def test_values_must_be_datetime64(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test that the values must be datetime64."""
dates = numpy.arange( | numpy.datetime64("2000-01-01", "h") | numpy.datetime64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.